repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Rohit-dev-coder/MyPythonCodes | 7,834,020,349,528 | fe2cf78ce9e1aeea0eff879d0b0cb72927043731 | 7de2bc9c439bb14f45840dbe9535cb4c65b87320 | /upperorlowerletter.py | f7282e4a3993d273f6aa33b9b211962d5ad3086c | []
| no_license | https://github.com/Rohit-dev-coder/MyPythonCodes | 51899330ddec120abd3340da4130ac375b74ba25 | 986fb8d9f40a55d2718563fb2ece3088f49f813b | refs/heads/master | 2020-06-29T08:11:21.971783 | 2019-08-04T11:00:04 | 2019-08-04T11:00:04 | 200,481,704 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ch = input("Enter a Character : ")
ch = ch[0]
if "A" <= ch <="Z":
print("Character is Upper")
elif "0" <= ch <= "9":
print("Character is Number")
else:
print("Character is lower")
| UTF-8 | Python | false | false | 189 | py | 43 | upperorlowerletter.py | 42 | 0.57672 | 0.560847 | 0 | 8 | 22.625 | 34 |
carriercomm/Trieste | 17,841,294,180,890 | 9bd60d09008239bb428329e8e5ea94f3e86c2e35 | ad4decd93a1baaf700eaee3f4a62085e30765c84 | /Trieste/vice/vicedata.py | 65b56a281768be8a73f4a7be4769de9635c5d60b | []
| no_license | https://github.com/carriercomm/Trieste | ca666a3cee249f247cc26dcb39d2ce738a394e2f | 5891cc1c02f4df9b460c00965c0c5f93f7734d3f | refs/heads/master | 2018-02-06T15:36:22.411758 | 2016-02-24T18:10:04 | 2016-02-24T18:10:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ###########################################################################
# Copyright (C) 2003 by Marcos Dione
# <mdione@grulic.org.ar>
#
# Copyright: See COPYING file that comes with this distribution
#
###########################################################################
from bsddb import hashopen
from threading import RLock
from errno import *
from os.path import dirname
from os import stat, makedirs, unlink
from Trieste.common.utils import csvPrettyPrint, csvParse
from Trieste.common.monitor import Monitor, CLock
from Trieste.common.object import Object
from Trieste.common import consts
class ViceData (Object):
"""
holds three hashes: one for data, anoother for stat, the last for locks
the first two are really bsddb's
the data one contains three kinds on entries:
* entries with key in the from "ino/chunkNo", which are for regular files.
each file is broken in chunks of the same size.
* entries with key in the from "ino/fileName", which are for directories.
that entry means that the dir w/ inode number ino has a link to another ino with filename fileName.
the value of such entry is the file's ino.
* entries with key in the from "ino", which are also for directories.
these ones contain a csv string of the filenames.
this one is needed because we need to get the listing of the dir,
and it's easier than to iterate over the keys of an bssdb.
"""
def __init__ (self, master, path):
Object.__init__ (self)
self.master= master
dir= dirname (path)
try:
stat (dir)
except OSError:
makedirs (dir)
self.path= path
self.data= None
self.meta= None
self.locks= None
self.mkDB ()
# master lock. global actions (like sync and emp) must acquire this one.
self.masterLock= RLock ()
def mkDB (self):
path= self.path
self.debug (1, "opening %s-data.bsddb" % path)
self.data= Monitor (hashopen ("%s-data.bsddb" % path, 'c'), 'data')
self.debug (1, "opening %s-metadata.bsddb" % path)
self.meta= Monitor (hashopen ("%s-metadata.bsddb" % path, 'c'), 'meta')
# per inode locks. any action over an inode must acquire
self.locks= Monitor ({}, 'locks')
def rmDB (self):
self.data= None
self.meta= None
self.locks= None
path= self.path
unlink ("%s-data.bsddb" % path)
unlink ("%s-metadata.bsddb" % path)
def hasInode (self, ino):
# self.debug (1, 'looking for inode %s' % ino)
self.meta.acquire ()
ans= self.meta.has_key (str(ino))
self.meta.release ()
return ans
def stat (self, ino, stat=None):
if stat:
# self.debug (2, 'setting stat')
self.meta[str(ino)]= str(stat)
else:
stat= eval (self.meta[str(ino)])
# TODO: so bark if it's not right
return stat
def keys (self):
self.meta.acquire ()
l= map (lambda x: int(x), self.meta.keys ())
self.meta.release ()
return l
def getValues (self, key):
# needed for master.giveData()
# this way a vice gives his url as the data (to be added [to the list that it's there])
# but it's a very ugly generalization
return [self.master.url ()]
#########
# file ops
#########
def size (self, ino, size=None):
stat= self.stat (ino)
if size is not None:
stat[consts.statSize]= size
self.stat (ino, stat)
return stat[consts.statSize]
def pages (self, ino):
"""
How much pages does ino have allocated.
"""
# for size==0 return 0
# for sizes E (1..pageSize) return 1
# and so forth
size= self.size (ino)
pages= (size+consts.pageSize-1)/consts.pageSize
# self.debug (2, "pages: %d, size: %d" % (pages, size))
return pages
def read (self, ino, off=0, size=None):
# profiling!
# return "\0"*size
# lot of things:
# first, if data is spread in several pages, read them
# second, what if @ eof? None
if size is None:
size= self.size (ino)
# self.debug (2, "reading @ %d, size %d" % (off, size))
data= ''
bytesToRead= size
# ugh
bytesRead= size
try:
while bytesToRead and bytesRead:
(page, start)= divmod (off, consts.pageSize)
end= min (consts.pageSize, start+bytesToRead)
pageData= self.data["%d/%d" % (ino, page)]
readData= pageData[start:end]
bytesRead= len (readData)
# self.debug (2, "off: %d, chunk: %d, chunkOff(CO): %d, max: %d, bytesRead: %d, bytesTR: %d, pageSz-CO: %d, CO+bytesTR: %d" %
# # (off, page, start, end, bytesRead, bytesToRead, consts.pageSize-start, start+bytesToRead))
data+= readData
bytesToRead-= bytesRead
off+= bytesRead
# self.debug (2, "off: %d, chunk: %d, chunkOff(CO): %d, max: %d, bytesRead: %d, bytesTR: %d, pageSz-CO: %d, CO+bytesTR: %d\n" %
# (off, page, start, end, bytesRead, bytesToRead, consts.pageSize-start, start+bytesToRead))
except KeyError:
# means EOF?
pass
return data
def trunc (self, ino, size=0, shrink=True):
# self.debug (2, 'trunc\'ing', 2)
fileSize= self.size (ino)
if size==fileSize:
# self.debug (2, 'already there (%d<->%d); bailing out' % (size, fileSize))
return size
# check if we need to create previous pages
lastPage= self.pages (ino)
(page, end)= divmod (size, consts.pageSize)
if page>lastPage:
# self.debug (1, 'expanding')
# ... data |
# ... | lastPage | ... | page |
# ^ end
# first add '0's to last page
try:
pageData= self.data["%d/%d" % (ino, lastPage)]
except KeyError:
self.master.fblocks-= 1
if self.master.fblocks==0:
return -ENOSPC
pageData= ''
# decrease free count
pageLen= len (pageData)
if pageLen<consts.pageSize:
# self.debug (1, 'filling up lastPage: %d-> %d' % (pageLen, consts.pageSize))
pageData+= "\0"*(consts.pageSize-len (pageData))
self.data["%d/%d" % (ino, lastPage)]= pageData
# ... data |0000|
# ... | lastPage | ... | page |
# ^ end
# now fill the pages gap, starting from the next one to the last
# till the previus one to the wanted page
i= lastPage+1
while i<page:
self.master.fblocks-= 1
if self.master.fblocks==0:
return -ENOSPC
# self.debug (1, 'added page %d' % i)
self.data["%d/%d" % (ino, i)]= "\0"*consts.pageSize
# decrease free count
i+= 1
# ... data |000000...0|
# ... | lastPage | ... | page |
# ^ end
# decrease free count
self.master.fblocks-= 1
# self.debug (1, 'filling up page: -> %d' % end)
self.data["%d/%d" % (ino, page)]= "\0"*end
elif page<=lastPage:
# ... data |
# ... | page | ... | lastPage |
# ^ end
if shrink:
# self.debug (1, 'shrinking')
i= lastPage
while i>page:
try:
del self.data["%d/%d" % (ino, i)]
# self.debug (1, 'del\'ed page %d' % i)
except KeyError:
# self.debug (1, 'page %d not present for deletion' % i)
pass
# increase free count
self.master.fblocks+= 1
i-= 1
# self.debug (1, 'done sh\'k\'n')
try:
pageData= self.data["%d/%d" % (ino, page)]
except KeyError:
pageData= ''
# decrease free count
self.master.fblocks-= 1
pageLen= len(pageData)
if pageLen>end and shrink:
# ... data |
# ... | page | ... | lastPage |
# ^ end
pageData= pageData[:end]
else:
# ... data |
# ... | page | ... | lastPage |
# ^ end
pageData+= "\0"*(end-pageLen)
# self.debug (1, 'somehting\'ing page: %d-> %d' % (pageLen, end))
self.data["%d/%d" % (ino, page)]= pageData
# ... data |
# ... | page |
# ^ end
# modify size
if (shrink and size<fileSize) or size>fileSize:
# self.debug (1, 'change size: %d-> %d' % (fileSize, size))
self.size (ino, size)
return size
def write (self, ino, off, data):
# profiling
# return len(data)
# self.debug (2, "writing in %d @ %d, size %d" % (ino, off, len(data)))
bytesToWrite= len (data)
totalBytesWrote= 0
self.trunc (ino, off, False)
while bytesToWrite:
# start end
# | |
# v v
# ... | page | ...
(page, start)= divmod (off, consts.pageSize)
end= min (consts.pageSize, start+bytesToWrite)
# self.debug (2, "o %d; btw %d; p %d[%d..%d]" % (off, bytesToWrite, page, start, end))
# get the page we'll be writing to
try:
pageData= self.data["%d/%d" % (ino, page)]
except KeyError:
# decrease free count
self.master.fblocks-= 1
if self.master.fblocks==0:
return -ENOSPC
# self.debug (2, 'new page %d' % page)
pageData= ''
pageLen= len(pageData)
# write
bytesWrote= end-start
# self.debug (2, ">%s<" % pageData)
# self.debug (2, "page: %d, start: %d, bytesToWrite: %d, bytesWrote: %d, end: %d, page[->]: >%s<, data[-]: >%s<, page[<-]: >%s<" %
# (page, start, bytesToWrite, bytesWrote, end, pageData[:start], data[:bytesWrote], pageData[start+bytesWrote:pageLen]))
pageData= pageData[:start]+data[:bytesWrote]+pageData[start+bytesWrote:pageLen]
# self.debug (2, ">%s<" % pageData)
self.data["%d/%d" % (ino, page)]= pageData
# adjust indexes and remaining data
data= data[bytesWrote:]
bytesToWrite-= bytesWrote
totalBytesWrote+= bytesWrote
off+= bytesWrote
# update ino size
fileSize= self.size (ino)
if off>fileSize:
fileSize= self.size (ino, off)
# for equalization in tests w/ adversaries
# self.data.sync ()
# self.meta.sync ()
return totalBytesWrote
def lock (self, ino, creating=False):
error= -ENOENT
# self.locks.acquire ()
# if not self.locks.has_key (ino):
# create lock
# self.locks[ino]= CLock ()
# get lock on ino
# self.locks[ino].acquire ()
exists= self.hasInode (ino)
if (exists and not creating) or (not exists and creating):
error= 0
else:
if exists and creating:
error= -EEXIST
# else -ENOENT
# self.unlock (ino)
# self.locks.release ()
return error
def unlock (self, ino):
error= -ENOENT
if self.hasInode (ino):
error= 0
# self.locks.acquire ()
# count= self.locks[ino].release ()
# if not count:
# del self.locks[ino]
# self.locks.release ()
return error
def sync (self):
self.masterLock.acquire ()
self.debug (1, 'sync')
self.data.acquire ()
self.data.sync ()
self.data.release ()
self.meta.acquire ()
self.meta.sync ()
self.meta.release ()
self.masterLock.release ()
#############
# dir methods
#############
def mkdir (self, ino, parent):
# decrease free counts
self.master.fblocks-= 1
self.dirContents (ino, [])
# completeness
self.link (ino, '.', ino, True, False)
# link already adds the name to the list above
self.link (ino, '..', parent, True, False)
def rmdir (self, ino):
error= -ENOTEMPTY
# delete both ino and all ino/child... in reverse order, obv.
children= self.dirContents (ino)
if len (children)==2:
for child in children:
key= "%d/%s" % (ino, child)
del self.data[key]
# now ino...
del self.data["%d" % ino]
# and metadata
del self.meta["%d" % ino]
# dec file count
self.master.ufiles-= 1
# inc free counts
self.master.fblocks+= 1
def link (self, dirIno, fileName, fileIno, inc, over):
self.debug (1, 'linking %d:%s:%d' % (dirIno, fileName, fileIno))
error= -EEXIST
key= "%d/%s" % (dirIno, fileName)
if not self.data.has_key (key) or over:
# add it if it's no there
list= self.dirContents (dirIno)
list.append (fileName)
self.dirContents (dirIno, list)
self.data[key]= str(fileIno)
if inc:
# inc link count
stat= self.stat (dirIno)
stat[consts.statNlink]+= 1
stat= self.stat (dirIno, stat)
error= 0
return error
def unlink (self, dirIno, fileName, dec):
error= -ENOENT
key= "%d/%s" % (dirIno, fileName)
if self.data.has_key (key):
# bye bye
del self.data[key]
# and remove it from the list
list= self.dirContents (dirIno)
# no checks; the try catches it
try:
list.remove (fileName)
self.dirContents (dirIno, list)
if dec:
# dec link count
stat= self.stat (dirIno)
stat[consts.statNlink]-= 1
stat= self.stat (dirIno, stat)
error= 0
except:
# ENOENT
pass
return error
def lookup (self, dirIno, fileName):
"""
if there's an entry w/ that name, return the inode
"""
key= "%d/%s" % (dirIno, fileName)
if self.data.has_key (key):
ans= int(self.data[key])
else:
ans= None
return ans
def dirContents (self, ino, contents=None):
if not contents==None:
self.data["%d" % ino]= csvPrettyPrint (contents)
ans= contents
else:
ans= csvParse (self.data["%d" % ino])
return ans
#######
# misc
#######
def emp (self):
self.masterLock.acquire ()
self.data.close ()
self.data.release ()
self.meta.close ()
self.meta.release ()
self.rmDB ()
self.mkDB ()
self.data.acquire ()
self.meta.acquire ()
self.masterLock.release ()
def fragments (self, ino):
"""
How much pages does ino have allocated.
"""
# for size==0 return 0
# for sizes E (1..pageSize) return 1
# and so forth
size= self.size (ino)
fragments= (size+consts.fragmentSize-1)/consts.fragmentSize
self.debug (2, "fragments: %d, size: %d" % (fragments, size))
return fragments
def usedBlocksAndFiles (self):
files= 0
blocks= 0
self.meta.acquire ()
# sometimes I just hate exceptions...
try:
key= self.meta.first ()[0]
except KeyError:
key= None
while key and not self.master._terminate:
self.debug (1, 'found key %s' % key)
files+= 1
blocks+= self.fragments (key)
try:
key= key= self.meta.next ()[0]
except KeyError:
key= None
self.meta.release ()
return (blocks, files)
| UTF-8 | Python | false | false | 14,734 | py | 37 | vicedata.py | 34 | 0.558708 | 0.552464 | 0 | 495 | 28.765657 | 143 |
niezongxia/personal | 18,090,402,275,959 | 7b9bbbd859ee869cf138338ec432341ac235b55f | 01cfe7cd0f1d24e1224f52b15cf86f223b52692d | /python/code/wait/read_excel.py | ef28a7202b54905ed44c7de1352a03f93be374f2 | []
| no_license | https://github.com/niezongxia/personal | a782397068bc2be6cae5e4cac0a0ee9ff4927443 | ff70eb1dba1cc8ac47b863e55533902bec1604e1 | refs/heads/master | 2022-10-02T14:36:20.163143 | 2022-08-27T14:09:25 | 2022-08-27T14:09:25 | 143,152,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #read_excel.py
import xlrd
fname="C:/Users/NZX/Desktop/test.xls"
rbook=xlrd.open_workbook(fname)
#row_value=xls_sheet.row_values(num-1)
#cow_value=xls_sheet.col_values(num-1)
rows=rbook.sheets()[0].nrows#获取已有行数
cols=rbook.sheets()[0].ncols#获取已有列数
for row in rows:
value=rbook.cell_value(row,1)
print(value)
| UTF-8 | Python | false | false | 347 | py | 25 | read_excel.py | 25 | 0.718266 | 0.702786 | 0 | 19 | 16 | 38 |
KouheiFurukawa/atcoder-back-number | 5,093,831,240,944 | 25b13fe4b92ae9c83a8f227eb508164e2e4a8a39 | b27b0bcd22973d029688ea3f10c9abfe651800a4 | /abc137/abc_137_c.py | 8c35b7c06b10186c08102d4a0a323e50017c0777 | []
| no_license | https://github.com/KouheiFurukawa/atcoder-back-number | 8f6b7c6704bc6fb32bb8ca9ccc9971650c0fdf8f | 2fe11830614bf2ce64ee7947734043fe47800277 | refs/heads/master | 2020-06-03T13:45:54.531015 | 2020-05-17T05:53:39 | 2020-05-17T05:53:39 | 191,591,257 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sys import stdin
N = int(stdin.readline().rstrip())
S = [stdin.readline().rstrip() for _ in range(N)]
count = {}
for s in S:
s = ''.join(sorted(s))
if s not in count:
count[s] = 1
else:
count[s] += 1
ans = 0
for c in count:
if count[c] > 1:
ans += count[c] * (count[c] - 1) // 2
print(ans)
| UTF-8 | Python | false | false | 339 | py | 237 | abc_137_c.py | 235 | 0.519174 | 0.501475 | 0 | 19 | 16.842105 | 49 |
olivercalder/scherzi | 12,034,498,374,276 | 45567a122cc837e49dc26ee2bb74300de368f8fe | 32a8b94476ab9fcb4c3896ebd1ac050d5389ab52 | /44.py | 27af49538181cd34a190bc5249829326c23e3670 | []
| no_license | https://github.com/olivercalder/scherzi | abf408c27a48dc3122b451768de76b937f2f0886 | 5a1fb78bf024d5176c0e100a818cc7205b135281 | refs/heads/master | 2022-05-02T17:12:26.138819 | 2022-04-26T04:00:26 | 2022-04-26T04:00:26 | 212,889,121 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def add_link(path, n):
if len(path) == 2*n:
return [path]
new_paths = []
if path.count('u') < n:
u_path = path.copy()
u_path.append('u')
new_paths += add_link(u_path, n)
if path.count('r') < n:
r_path = path.copy()
r_path.append('r')
new_paths += add_link(r_path, n)
return new_paths
def count_above(path):
total = 0
for i in range(len(path)):
link = path[i]
if link == 'u' and path[:i].count('u') >= (i+1)//2:
total += 1
return total
def flip_path(path, u_num):
i = 0
count = 0
while count < u_num:
if path[i] == 'u':
count += 1
i += 1
path = []
n3_paths = add_link(path, 3)
print(len(n3_paths))
for p in n3_paths:
print(''.join(p))
print(count_above(p))
| UTF-8 | Python | false | false | 823 | py | 49 | 44.py | 41 | 0.487242 | 0.471446 | 0 | 36 | 21.861111 | 59 |
adderan/VBS | 5,050,881,548,698 | 0a79c2f38e8dea61f0ffeb0b962ce57318e71aaa | a598a793998408416107615717eafa96c4efcd18 | /DelphesResolution/scripts/res_formula.py | 22d0df13ce7ec359f64fb67fee340d2ffc8aa67d | []
| no_license | https://github.com/adderan/VBS | f1bce5ed36eb65fdcbe5778746752ef6b3cb6157 | 85ae92d48797f4f3d91e14345f788a9cfb200cbc | refs/heads/master | 2016-02-24T17:54:51.557152 | 2015-02-24T06:15:55 | 2015-02-24T06:15:55 | 26,232,911 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math, formula_reader
def jet_res(PT, Eta, pileup = 0):
parameters = args.parameters.readlines()
a = None
b = None
S = None
C = None
#print "Eta = " + str(Eta)
for line in parameters:
#print(line)
pars = line.split()
etaregion = float(pars[0])
a = float(pars[1])
b = float(pars[2])
S = float(pars[3])
C = float(pars[4])
if Eta < etaregion:
break
N = a + b*pileup
sigma = math.sqrt((N**2)/(PT**2) + (S**2)/PT + C**2)
return sigma
def sigma_id(PT, eta):
parameters = formula_reader.read_parameters("parameters/idparameters.csv")
eta_step = float(parameters[0][1]) - float(parameters[0][0])
parameter_index = int(eta/eta_step)
eta1, eta2, a1, a2 = parameters[parameter_index]
return math.sqrt(float(a1)*float(a1) + float(a2)*PT*float(a2)*PT)
def sigma_ms(PT, eta):
parameters = [[0.24, 0.02676, 0.00012], [0.0, 0.0388, 0.00016]]
if eta < 1.05:
parameter_index = 0
else: parameter_index = 1
b0, b1, b2 = parameters[parameter_index]
return math.sqrt((float(b0)/PT)**2 + float(b1)**2 + (float(b2)*PT)**2)
def sigma_linear(PT, eta):
parameters = 0.2
return parameters
def sigma_idms(PT, eta):
id_value = sigma_id(PT, eta)
ms_value = sigma_ms(PT, eta)
#print "MS = " + str(ms_value) + " ID = " + str(id_value)
return id_value * PT * ms_value / math.sqrt((PT*id_value)**2 + (PT*ms_value)**2)
| UTF-8 | Python | false | false | 1,344 | py | 9 | res_formula.py | 5 | 0.634673 | 0.584077 | 0 | 45 | 28.822222 | 81 |
iamtrueline/Programmers | 438,086,678,681 | e9f725da941f5c2a3320fbfd863efd31cf019697 | 9eacf21048e9224443d7a7775497adf824f5549b | /Python/조이스틱.py | 53c45f09c6a521e09c24a0238b6e693fb5ba4b1d | []
| no_license | https://github.com/iamtrueline/Programmers | fb0eb29046ba813aa532a35b474ae8349c827e62 | ac19a3999cc46c748593ab9e08f4d14a13579312 | refs/heads/main | 2023-06-26T05:34:44.899022 | 2021-07-30T03:23:02 | 2021-07-30T03:23:02 | 380,900,305 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def solution(name):
answer = 0
change = [min(ord(i)-ord('A'), ord('Z')-ord(i)+1) for i in name]
idx = 0
while True:
answer += change[idx]
change[idx]=0
left = 1
right = 1
if sum(change)== 0:
return answer
while change[idx- left] == 0:
left += 1
while change[idx + right] == 0:
right += 1
answer += min(left, right)
idx += -left if left < right else right
return answer
| UTF-8 | Python | false | false | 526 | py | 40 | 조이스틱.py | 39 | 0.452471 | 0.431559 | 0 | 23 | 21.869565 | 68 |
luoxz/GID_Internal | 730,144,482,669 | d864ae640e6a82699c7635c16bfc6ef0dc5b98f9 | 0010f2f71aa04595ecbc84691f330c185f5c7356 | /test/client/test_dev_server.py | 87f883bd62c84d3f91aa028690e9f8f8b9f8a636 | [
"MIT"
]
| permissive | https://github.com/luoxz/GID_Internal | e38be906f255931e6ef2a1a03695ee33db3f804b | 452cca7543d80f31c8f659709d9143ecc1d8eb93 | refs/heads/master | 2021-01-16T00:56:26.411020 | 2017-07-18T18:55:01 | 2017-07-18T18:55:01 | 99,984,036 | 1 | 0 | null | true | 2017-08-11T02:37:40 | 2017-08-11T02:37:40 | 2017-07-12T20:35:47 | 2017-07-18T18:57:01 | 35,094 | 0 | 0 | 0 | null | null | null | '''
Test the correctness of the dev server, which simulates the functions of unrealcv server in python
'''
import threading, time, socket, unittest, logging, sys
if (sys.version_info > (3, 0)):
import socketserver as SocketServer
else:
import SocketServer
from dev_server import MessageServer, MessageTCPHandler
import unrealcv
host = 'localhost'
port = 9001
def test_server():
server = MessageServer((host, port))
server.start()
server.shutdown()
def test_release():
'''
Test whether resources are correctly released
'''
for i in range(10):
server = MessageServer((host, port))
server.start() # Make sure the port has been released, otherwith this will fail
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
server.shutdown()
def test_client_side_close():
'''
Test whether the server can correctly detect client disconnection
'''
server = MessageServer((host, port))
server.start()
for i in range(10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
unrealcv.SocketMessage.WrapAndSendPayload(s, 'hello')
s.close() # It will take some time to notify the server
time.sleep(0.5) # How long will the server should detect the client side loss
server.shutdown()
if __name__ == '__main__':
test_server()
test_release()
test_client_side_close()
| UTF-8 | Python | false | false | 1,460 | py | 42 | test_dev_server.py | 22 | 0.664384 | 0.656164 | 0 | 51 | 27.627451 | 98 |
cowcai1988/-Poi- | 2,851,858,305,504 | 38cdd581f1d5dd18c739e4d8b6a4324bf79a08e1 | 50e03c155e196a2d645b7b0d1849e80e495d0a36 | /yongdushuju.py | c4817d8537508cef81f3926491a93310b9607677 | []
| no_license | https://github.com/cowcai1988/-Poi- | 8f36938880ecfbe3c82f30febeb6bf05caaeadb1 | 764b08961a6343b1760a76c6d22da3880fd581d1 | refs/heads/master | 2020-07-26T06:01:26.036224 | 2019-05-20T07:34:37 | 2019-05-20T07:34:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
import time
import pymysql
import threading
from pandas import DataFrame
def getGaodeTrafficStatus(key,furl,currentTime):
insert_list = []
TrafficStatusUrl = furl
res = requests.get(url=TrafficStatusUrl).content
res=res.decode("utf-8")
total_json = json.loads(res)
print(total_json)
jsondata = total_json['trafficinfo']['roads']
currentDate = time.strftime("%Y-%m-%d", time.localtime())
if any(jsondata):
for i in jsondata:
name = i['name']
status = i['status']
direction = i['direction']
evaluation=total_json['trafficinfo']['evaluation']
df = DataFrame({
'p_str': total_json['trafficinfo']['evaluation']
});
p_float = df['p_str'].str.strip("%")
evaluation=p_float
angle = i['angle']
speed = i.get('speed')
if speed is None:
speed = None
lcodes = i['lcodes']
polyline = i['polyline']
list = [name, evaluation,status,direction, angle, lcodes, polyline,
currentDate, currentTime, speed]
insert_list.append(list)
db = pymysql.connect("localhost", "root", "root", "ttt")
cursor = db.cursor()
print(len(insert_list))
for i in insert_list:
print(len(i))
if len(i):
print(
"insert into biao(name,evaluation,status,direction,angle,lcodes,polyline, currentDate, currentTime, speed) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9]))
print("----------------------------")
cursor.execute(
"insert into biao(name,evaluation,status,direction,angle,lcodes,polyline, currentDate, currentTime, speed) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9]))
db.commit()
db.close()
keyList=[{}]
rectangleList=[]
def pydata():
currentTime = time.strftime("%H:%M:%S", time.localtime())
key='bc411bc209882674362f05b90d6167d1'
rectangle='112.89173,23.17571;112.90443,23.16561'#矩形左上右下坐标;隔开两个坐标
# location='112.86461,23.51614'#圆形搜索
# location_arr="[112.86461,23.51614],[112.87079,23.46136],[112.84058,23.41411],[112.94632,23.41159]"
# radius='5000'#圆形半径
type='rect'
if(type=="rect"):
url='http://restapi.amap.com/v3/traffic/status/rectangle?key='+key+'&rectangle='+rectangle+'&extensions=all'
else:
url='http://restapi.amap.com/v3/traffic/status/circle?key='+key+'&location='+location+'&radius='+radius+'&extensions=all'
getGaodeTrafficStatus(key,url,currentTime);
timer = threading.Timer(5,pydata)
timer.start()
if __name__ == "__main__":
timer = threading.Timer(5,pydata)
timer.start()
| UTF-8 | Python | false | false | 3,165 | py | 2 | yongdushuju.py | 1 | 0.530619 | 0.479962 | 0 | 72 | 41.319444 | 189 |
jiaweiM/PythonNotes | 18,433,999,657,422 | 337516b42f7349c093905f2bff8b8e7aa936b09a | ae6e9d7ab3795d7580ba2d8dfa796e28d94e315c | /src/ml_test/lr_1.py | 388019e9e0b5e24abbde492f5e1b96621fbeaeb4 | []
| no_license | https://github.com/jiaweiM/PythonNotes | 302a3c6d04638628a94a5ccd96b848258c23653c | 66227b740cd05d61d77f1cd57a5f4e39f7c1a26f | refs/heads/master | 2021-09-22T08:47:42.956762 | 2021-09-14T07:44:19 | 2021-09-14T07:44:19 | 246,194,973 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import tensorflow as tf
california_housing_dataframe = pd.read_csv("../datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe['median_house_value'] /= 1000.0
print(california_housing_dataframe.describe())
my_feature = california_housing_dataframe[["total_rooms"]]
feature_columns = [tf.feature_column.numeric_column("total_rooms")]
targets = california_housing_dataframe["median_house_value"]
# Use gradient descent as the optimizer for training the model.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0000001)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# Configure the linear regression model with our feature columns and optimizer.
# Set a learning rate of 0.0000001 for Gradient Descent.
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
| UTF-8 | Python | false | false | 1,061 | py | 393 | lr_1.py | 177 | 0.787936 | 0.766258 | 0 | 26 | 39.807692 | 95 |
shahrulkamil98/183DB-Capstone | 9,646,496,590,658 | c0b38865460ad5ef2ab7ab882e12598308b95667 | fca8c4124dfbec8975385df924b010829be38a2e | /Demo/Demo/controllers/computer/computer.py | 94b9bd0f7e1303090aaecbdb95b9a35bd49399e8 | []
| no_license | https://github.com/shahrulkamil98/183DB-Capstone | 37a2b56dfbec50488ba44b7f0e9d2dc666700930 | 4e8dab3e00e55dc8e2dc9242e7c52a4220176b48 | refs/heads/main | 2023-06-01T16:18:50.144327 | 2021-06-12T01:52:30 | 2021-06-12T01:52:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # computer.py
from controller import *
receiverRobot = Robot()
TIME_STEP = int(receiverRobot.getBasicTimeStep())
computer_receiver = receiverRobot.getDevice('computer_receiver')
computer_emitter = receiverRobot.getDevice('computer_emitter')
computer_receiver.enable(TIME_STEP)
t = 0
while(t<1000):
receiverRobot.step(TIME_STEP)
if computer_receiver.getQueueLength() > 0:
A = computer_receiver.getData()
A_decoded = int.from_bytes(A, byteorder='big')
computer_receiver.nextPacket()
t = t+TIME_STEP
# Next function
receiverRobot.step(TIME_STEP)
computer_emitter.send(bytes([2]))
computer_emitter.send(bytes([1]))
computer_emitter.send(bytes([4]))
computer_emitter.send(bytes([3]))
while t<10000:
receiverRobot.step(TIME_STEP) | UTF-8 | Python | false | false | 741 | py | 20 | computer.py | 7 | 0.755735 | 0.735493 | 0 | 31 | 22.935484 | 64 |
Rao-Varun/gamerbuddy | 16,630,113,372,195 | cedb0416e5b1158c0ff2d6eed89c619e9972486e | 2dd7a5964e6333217fb816e8164f120d612c6098 | /venv/Lib/site-packages/nltk/test/unit/test_wordnet.py | f83dbcca93d26ed64124b4e9ce8e431b8a1f0b6a | []
| no_license | https://github.com/Rao-Varun/gamerbuddy | ab8cf2ab19f113edc1a3b0626c8645a30b910fcd | 01517ead2685789a4de73a5d696c33c30b2fc6d2 | refs/heads/master | 2020-05-02T00:48:34.195704 | 2019-05-09T00:00:48 | 2019-05-09T00:00:48 | 177,677,039 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:09c70093103df502db9a37d82de634943533749942d07e818f46a81529abe41f
size 7066
| UTF-8 | Python | false | false | 129 | py | 250 | test_wordnet.py | 228 | 0.883721 | 0.457364 | 0 | 3 | 42 | 75 |
redbrick/rbvm-libvirt | 15,238,544,002,141 | 3b322126145960ab7db87eb15a4c5f2bc7b0df77 | 462b8d696e9e05322ba0b48568768bf1b5850513 | /rbvm/model/database.py | 9f7b31c192687b429965b33908fba44a94616642 | []
| no_license | https://github.com/redbrick/rbvm-libvirt | fbedd656d8a3fbcf84fb20e7ae7286b042a8b69a | bd6f75964fb2f01cb5bb3d6facfc0857e4c3d11c | refs/heads/master | 2021-01-22T06:18:38.589221 | 2017-05-26T19:56:51 | 2017-05-26T19:56:51 | 92,536,318 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import sqlalchemy
import random
import string
import hashlib
import os
import base64
import datetime
import libvirt
from sqlalchemy import Table,Column,MetaData,ForeignKey
from sqlalchemy.schema import Sequence, ForeignKeyConstraint
from sqlalchemy import Integer,String,DateTime,Unicode,SmallInteger,Text,Binary,Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation,backref
from sqlalchemy.ext.associationproxy import association_proxy
import rbvm.lib.sqlalchemy_tool as database
session = None # Initialised at runtime by single-threaded daemons (multi threaded daemons use sqlalchemy_tool)
Base = declarative_base()
class User(Base):
"""
User
"""
# {{{
__tablename__ = 'user_table'
id = Column(Integer,Sequence('user_table_id_seq'),primary_key=True)
username = Column(String(255),unique=True,nullable=False)
salt = Column(String(10),nullable=False)
password = Column(String(255),nullable=False)
email_address = Column(String(255),nullable=False)
domains = relation('Domain', backref='user')
def set_password(self,password_plain):
salt = ''.join(random.Random().sample(string.letters + string.digits,9))
hash = hashlib.sha256()
hash.update(password_plain + salt)
self.password = hash.hexdigest()
self.salt = salt
def __repr__(self):
return "<User('%s')>" % (self.username)
def __init__(self,username,email_address,password_plain=None):
self.username = username
if not password_plain:
password_plain = "".join(random.sample(string.letters + string.digits,8))
self.set_password(password_plain)
self.email_address = email_address
def has_ability(ability_name):
for ability_obj in self.abilities:
if ability_obj.system_name == ability_name:
return True
for group in self.groups:
if group.has_ability(ability_name):
return True
return False
# }}}
user_group = Table('user_group',Base.metadata, # {{{
Column('user_id',Integer,ForeignKey('user_table.id')),
Column('group_id',Integer,ForeignKey('group_table.id'))
) # }}}
class Group(Base):
"""
User gorup
"""
# {{{
__tablename__ = 'group_table'
id = Column(Integer,Sequence('group_table_id_seq'),primary_key=True)
name = Column(String(255))
system_name = Column(String(255))
users = relation('User',secondary=user_group,backref='groups')
def __repr__(self):
return "<Group('%s')>" % (self.name)
def __init__(self,name,system_name):
self.name = name
self.system_name = system_name
def has_ability(self, ability_name):
for ability_obj in self.abilities:
if ability_obj.system_name == ability_name:
return True
return False
# }}}
user_ability = Table('user_ability',Base.metadata, # {{{
Column('user_id',Integer,ForeignKey('user_table.id')),
Column('ability_id',Integer,ForeignKey('ability.id'))
) # }}}
group_ability = Table('group_ability',Base.metadata, # {{{
Column('group_id',Integer,ForeignKey('group_table.id')),
Column('ability_id',Integer,ForeignKey('ability.id'))
) # }}}
class Ability(Base):
"""
Abilities, assigned to groups and users
"""
# {{{
__tablename__ = 'ability'
id = Column(Integer,Sequence('ability_id_seq'),primary_key=True)
name = Column(String(255))
system_name = Column(String(255))
users = relation('User',secondary=user_ability,backref="abilities")
groups = relation('Group',secondary=group_ability,backref="abilities")
def __repr__(self):
return "<Ability('%s')>" % self.system_name
def __init__(self, name, system_name):
self.name = name
self.system_name = system_name
# }}}
class Hypervisor(Base):
"""
A hypervisor
"""
__tablename__ = 'hypervisor'
id = Column(Integer,Sequence('hypervisor_id_seq'),primary_key=True)
name = Column(String(255))
uri = Column(String(1024))
domains = relation('Domain',backref='hypervisor')
def __repr__(self):
return "<Hypervisor('%s')>" % self.uri
def __init__(self, name, uri):
self.name = name
self.uri = uri
def connect(self):
return libvirt.open(self.uri)
def list_domains(self):
conn = self.connect()
return [conn.lookupByName(n) for n in conn.listDefinedDomains()] + [conn.lookupByID(i) for i in conn.listDomainsID()]
class Domain(Base):
"""
Maps a domain (by UUID) to a user.
"""
__tablename__ = 'domain'
id = Column(Integer,Sequence('domain_id_seq'),primary_key=True)
uuid = Column(String(36))
user_id = Column(ForeignKey('user_table.id'))
hypervisor_id = Column(ForeignKey('hypervisor.id'))
def __repr__(self):
return "<Domain('%s'>)" % (self.uuid)
def __init__(self, uuid, user, hypervisor):
self.uuid = uuid
self.user_id = user.id
self.hypervisor_id = hypervisor.id
class OneTimeToken(Base):
"""
A token that can be sent to the client (in unreadable form) and sent
back to verify a command's origin.
"""
# {{{
__tablename__ = 'one_time_token'
id = Column(Integer,Sequence('one_time_token_id_seq'),primary_key=True)
token = Column(String(255),index=True)
timestamp = Column(DateTime)
used = Column(Boolean)
user_id = Column(ForeignKey('user_table.id'))
def __repr__(self):
return "<OneTimeToken('%s')>" % (self.token)
def __init__(self,user):
assert user is not None
# Generate a random token
self.token = base64.b64encode(os.urandom(200))[:255]
self.timestamp = datetime.datetime.now()
self.used = False
self.user_id = user.id
def check_and_expire(self,user):
"""
Returns whether or not a token has been used before or is invalid,
and marks the token as used.
"""
seconds = 60 * 15
delta = datetime.timedelta(seconds=seconds)
try:
assert user is not None
assert self.user_id == user.id
assert self.used == False and self.timestamp + delta > datetime.datetime.now()
except AssertionError:
return True
self.used = True
if session is None:
database.session.commit()
else:
session.commit()
return False
# }}}
| UTF-8 | Python | false | false | 6,766 | py | 41 | database.py | 35 | 0.602128 | 0.593704 | 0 | 235 | 27.791489 | 125 |
apeckman/PennApps2015 | 13,950,053,825,335 | 4c4ddf8b1ac08434123b1c9d3533572335546caf | 0548becc751743b9243fe99eb6c68ad4b835649e | /bae/stocks/PythonAlg/Ftl.py | 3760d8b1421221bd2355f70124a7e35c71a9fbb8 | []
| no_license | https://github.com/apeckman/PennApps2015 | 0deda7cccc026dffdd699ceef1a61ce76cf90042 | 0f88b64fea91dbbe0c39348424788c435a580e27 | refs/heads/master | 2021-01-15T19:28:00.989846 | 2015-09-06T10:35:37 | 2015-09-06T10:35:37 | 41,937,692 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
class Ftl(object):
exp = []
algWeight = 1.0
name = 'FTL'
def __init__(self):
self.exp = []
def getName(self):
return self.name
def getWeight(self):
return self.algWeight
def setWeight(self,_weight):
self.algWeight = _weight
def add(self, e):
self.exp.append(e)
def ftl(self):
name = None
maxf = sys.float_info.max
for e in self.exp:
if (e.cost < maxf):
name = e
maxf = e.cost
return name
| UTF-8 | Python | false | false | 447 | py | 31 | Ftl.py | 20 | 0.61745 | 0.612975 | 0 | 30 | 13.8 | 29 |
PyBeaner/IntroductionToAlgorithms | 8,040,178,818,791 | e8a4c70ba654c1010aca17e540592f948fcf3d98 | 896c93a239de381c40151832b4d07b538bc86d63 | /第一部分 基础知识/第四章 分治策略/4.1 最大子数组/max-subarray-brute-force.py | a965a79bc5c65d91403dffc613d7666f5041c677 | []
| no_license | https://github.com/PyBeaner/IntroductionToAlgorithms | 59b26fd8ab25202eaeca0745e5f4be3bbadb89aa | 561b0d82540769a1cc787226b44e9c2e44d28925 | refs/heads/master | 2020-05-18T10:22:32.577205 | 2015-07-30T15:53:24 | 2015-07-30T15:53:24 | 38,962,003 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'PyBeaner'
threshold = -10**10
# O(n^2)
def find_max_subarray(alist):
length = len(alist)
max_sum = threshold
left = right = 0
for i in range(length):
left_in_loop = right_in_loop = i
sum_in_loop = 0
max_sum_in_loop = threshold
for j in range(i, length):
sum_in_loop += alist[j]
if sum_in_loop > max_sum_in_loop:
max_sum_in_loop = sum_in_loop
right_in_loop = j
if max_sum_in_loop > max_sum:
max_sum = max_sum_in_loop
left = left_in_loop
right = right_in_loop
return left, right, max_sum
if __name__ == '__main__':
from random import sample
# alist = sample(range(-10, 0), 10)
alist = sample(range(-10, 10), 10)
print(alist)
result = find_max_subarray(alist)
print(result)
| UTF-8 | Python | false | false | 866 | py | 29 | max-subarray-brute-force.py | 29 | 0.533487 | 0.512702 | 0 | 33 | 25.242424 | 45 |
gitddabong/acmicpc | 1,640,677,544,517 | 1ebc639459bd062bb69aeec921d5636e0ad2967a | a7ebd5ac27af32faa19ea84b9a2b64abdf70ae22 | /2751.py | e79bc9206329f8ed31d0d1944970211b2ff69d96 | []
| no_license | https://github.com/gitddabong/acmicpc | b1c6e162ed1954e889945622cea02e485dd6b9cc | 433f5be41f29757f4dc9f6ff502950786714566a | refs/heads/master | 2023-07-29T04:25:20.933055 | 2021-09-06T12:03:25 | 2021-09-06T12:03:25 | 394,655,049 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
n = int(sys.stdin.readline()) # 자바 식 입력
array = [0] * n
for i in range(n):
array[i] = int(sys.stdin.readline())
for i in sorted(array):
sys.stdout.write(str(i) + '\n') # 자바 식 출력 | UTF-8 | Python | false | false | 229 | py | 23 | 2751.py | 16 | 0.569378 | 0.564593 | 0 | 10 | 20 | 49 |
drkovalskyi/Bmm5 | 12,644,383,766,916 | 28d98425583aa0915a79a1242f0406fe8d24aef0 | 274169d01aaeb3704aee765135d84c9f28349787 | /NanoAOD/validation/fake_muon_sources.py | 760ef60ae328c6cb65582eb602d3c23a4c80b593 | []
| no_license | https://github.com/drkovalskyi/Bmm5 | 891acb1ac50b0ad3061cf44a72a8aab01c34449d | c56b03c919a9827ca99baa46dc8a8b7b28aaf924 | refs/heads/master | 2023-08-31T11:24:55.148995 | 2023-08-29T12:35:40 | 2023-08-29T12:35:40 | 213,879,062 | 1 | 6 | null | false | 2021-01-08T15:30:01 | 2019-10-09T09:41:48 | 2021-01-05T14:08:25 | 2021-01-05T14:08:32 | 36,062 | 1 | 2 | 4 | Python | false | false | import ROOT
import sys, os, subprocess
from DataFormats.FWLite import Events, Handle
from ROOT import TFile,TTree,TH1,TROOT,TDirectory,TPad,TCanvas,TColor
from math import *
# output_path = "/afs/cern.ch/user/d/dmytro/www/public_html/plots/bmm5_NanoAODv6-508/muon_fake_sources_bhh_medium"
# output_path = "/afs/cern.ch/user/d/dmytro/www/public_html/plots/bmm5_NanoAODv6-508/muon_fake_sources_bhh_loose"
output_path = "/afs/cern.ch/user/d/dmytro/www/public_html/plots/bmm5_NanoAODv6-508/muon_fake_sources_bhh_mva"
# output_path = "/afs/cern.ch/user/d/dmytro/www/public_html/plots/bmm5_NanoAODv6-508/muon_fake_sources_mu_enriched_loose"
dump_info = False
min_pt = 4
# muon_id = None
muon_id = "SoftMvaId"
# muon_id = "MediumId"
files = []
# find files
path = "/eos/cms/store/user/dmytro/"
pds = [
# 'JpsiToMuMu_JpsiPt8_TuneCP5_13TeV-pythia8'
# MuEnriched - lots of data
# 'QCD_Pt-20to30_MuEnrichedPt5_TuneCP5_13TeV_pythia8/crab_MuonFakeSkim_QCD_Pt-20to30_MuEnrichedPt5_TuneCP5_13TeV_pythia8_1603447990',
# 'QCD_Pt-30to50_MuEnrichedPt5_TuneCP5_13TeV_pythia8/crab_MuonFakeSkim_QCD_Pt-30to50_MuEnrichedPt5_TuneCP5_13TeV_pythia8_1603446796',
# 'QCD_Pt-50to80_MuEnrichedPt5_TuneCP5_13TeV_pythia8/crab_MuonFakeSkim_QCD_Pt-50to80_MuEnrichedPt5_TuneCP5_13TeV_pythia8_1603448253',
# 'QCD_Pt-80to120_MuEnrichedPt5_TuneCP5_13TeV_pythia8/crab_MuonFakeSkim_QCD_Pt-80to120_MuEnrichedPt5_TuneCP5_13TeV_pythia8_1603448316',
# 'QCD_Pt-120to170_MuEnrichedPt5_TuneCP5_13TeV_pythia8/crab_MuonFakeSkim_QCD_Pt-120to170_MuEnrichedPt5_TuneCP5_13TeV_pythia8_1603448633'
# # Bhh
'BdToKK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_BdToKK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603710814',
'BdToKPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_BdToKPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603711796',
'BdToPiPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_BdToPiPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603711442',
'BsToKK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_BsToKK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603711503',
'BsToKPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_BsToKPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603731003',
'LambdaBToPK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_LambdaBToPK_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603711266',
'LambdaBToPPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_MuonFakeSkim_LambdaBToPPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_1603710918'
]
for pd in pds:
for f in subprocess.check_output("find %s/%s/ -type f -name '*.root'|grep crab_MuonFakeSkim" % (path, pd), shell=True).split("\n"):
if f != "":
files.append(f)
# break # use just the first file
print "Number of files: %u" % len(files)
events = Events(files)
# events = Events (
# [
# # '/afs/cern.ch/work/d/dmytro/projects/RunII-NanoAODv6/src/Bmm5/NanoAOD/test/muon_fake_skim.root'
# '/afs/cern.ch/work/d/dmytro/projects/RunII-NanoAODv6/src/Bmm5/NanoAOD/test/BdToKPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen_RunIIAutumn18MiniAOD_muon_fake_skim.root',
# '/afs/cern.ch/work/d/dmytro/projects/RunII-NanoAODv6/src/Bmm5/NanoAOD/test/LambdaBToPPi_BMuonFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen+RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2+MINIAODSIM.root'
# # '/afs/cern.ch/work/d/dmytro/projects/RunII-NanoAODv6/src/Bmm5/NanoAOD/test/test.root'
# ]
# )
def isAncestor(a,p) :
if a == p :
return True
for i in xrange(0,p.numberOfMothers()) :
if isAncestor(a,p.mother(i)) :
return True
return False
def find_parent(cand):
# look for a parent with a different PDG id
parent = cand
# do a fixed depth loop to avoid endless loops
for i in range(100):
parent = parent.mother()
if not parent: break
if parent.pdgId()!=cand.pdgId():
return parent
return None
def isGoodMuon(muon):
if not muon.isTrackerMuon(): return False
if not muon.isGlobalMuon(): return False
if not muon.isLooseMuon(): return False
if not muon.innerTrack().quality(ROOT.reco.Track.highPurity): return False
if muon.pt() < min_pt or abs(muon.eta()) > 1.4: return False
if muon_id:
if muon_id == "SoftMvaId":
if not muon.passed(ROOT.reco.Muon.SoftMvaId): return False
elif muon_id == "MediumId":
if not muon.passed(ROOT.reco.Muon.CutBasedIdMedium): return False
else:
raise Exception("Uknown muon_id: %s" % muon_id)
return True
def deltaPhi(phi1,phi2):
return acos(cos(phi2-phi1))
def deltaR(p1,p2):
return sqrt(pow(deltaPhi(p1.phi(),p2.phi()),2)+pow(p2.eta()-p1.eta(),2))
def print_canvas(output_name_without_extention, path, canvas=ROOT.gPad):
if not os.path.exists(path):
os.makedirs(path)
canvas.Print("%s/%s.png"%(path,output_name_without_extention))
canvas.Print("%s/%s.pdf"%(path,output_name_without_extention))
canvas.Print("%s/%s.root"%(path,output_name_without_extention))
canvas.Print("%s/%s.C"%(path,output_name_without_extention))
fake_types = {
211:'pion',
321:'kaon',
2212:'proton'
}
def get_sim_type(muon):
if muon.simPdgId() == 0: return 'not_matched'
if abs(muon.simPdgId()) == 13:
if abs(muon.simMotherPdgId()) == 211: return 'pion'
if abs(muon.simMotherPdgId()) == 321: return 'kaon'
if abs(muon.simMotherPdgId()) == 2212: return 'proton'
return 'other'
sim_type_to_value = {
'not_matched':0.5,
'pion':1.5,
'kaon':2.5,
'proton':3.5,
'other':4.5
}
ROOT.gROOT.SetBatch(True)
handlePruned = Handle ("std::vector<reco::GenParticle>")
handlePacked = Handle ("std::vector<pat::PackedGenParticle>")
labelPruned = ("prunedGenParticles")
labelPacked = ("packedGenParticles")
muonHandle, muonLabel = Handle("std::vector<pat::Muon>"),"slimmedMuons"
h_sim_match = dict()
h_genid_not_matched = dict()
h_sim_relative_pt = dict()
h_sim_relative_pt_type_matched = dict()
h_sim_relative_pt_wrong_type_matched = dict()
h_sim_relative_pt_matched_to_other = dict()
h_sim_decay_rho = dict()
h_sim_decay_rho_type_matched = dict()
h_sim_decay_rho_wrong_type_matched = dict()
h_sim_decay_rho_matched_to_other = dict()
h_sim_decay_rho_matched_to_other_same_pt = dict()
for id,name in fake_types.items():
h_sim_match[name] = ROOT.TH1D("h_sim_match_%s" % name,
"Muon matching based on simulated hits", 5, 0, 5)
h_sim_match[name].GetXaxis().SetBinLabel(1, "No Match")
h_sim_match[name].GetXaxis().SetBinLabel(2, "Muon from Pion")
h_sim_match[name].GetXaxis().SetBinLabel(3, "Muon from Kaon")
h_sim_match[name].GetXaxis().SetBinLabel(4, "Muon from Proton")
h_sim_match[name].GetXaxis().SetBinLabel(5, "Other")
h_sim_match[name].SetFillColor(ROOT.kMagenta)
h_genid_not_matched[name] = ROOT.TH1D("h_genid_not_matched_%s" % name,
"Gen |pdgId| for muons not matched by simulated hits", 350, 0, 350)
h_genid_not_matched[name].SetLineColor(ROOT.kBlue)
h_genid_not_matched[name].SetLineWidth(2)
h_genid_not_matched[name].GetXaxis().SetTitle("|pdgId|")
h_sim_relative_pt[name] = ROOT.TH1D("h_sim_relative_pt_%s" % name,
"Relative Pt of sim-matched particle", 100, 0, 2)
h_sim_relative_pt[name].SetLineColor(ROOT.kBlue)
h_sim_relative_pt[name].SetLineWidth(2)
h_sim_relative_pt[name].GetXaxis().SetTitle("Pt_{sim}/Pt_{reco}")
h_sim_relative_pt_type_matched[name] = ROOT.TH1D("h_sim_relative_pt_type_matched_%s" % name,
"Relative Pt of sim particle (type matched)", 100, 0, 2)
h_sim_relative_pt_type_matched[name].SetLineColor(ROOT.kBlue)
h_sim_relative_pt_type_matched[name].SetLineWidth(2)
h_sim_relative_pt_type_matched[name].GetXaxis().SetTitle("Pt_{sim}/Pt_{reco}")
h_sim_relative_pt_wrong_type_matched[name] = ROOT.TH1D("h_sim_relative_pt_wrong_type_matched_%s" % name,
"Relative Pt of sim particle (wrong match)", 100, 0, 2)
h_sim_relative_pt_wrong_type_matched[name].SetLineColor(ROOT.kBlue)
h_sim_relative_pt_wrong_type_matched[name].SetLineWidth(2)
h_sim_relative_pt_wrong_type_matched[name].GetXaxis().SetTitle("Pt_{sim}/Pt_{reco}")
h_sim_relative_pt_matched_to_other[name] = ROOT.TH1D("h_sim_relative_pt_matched_to_other_%s" % name,
"Relative Pt of sim particle (other match)", 100, 0, 2)
h_sim_relative_pt_matched_to_other[name].SetLineColor(ROOT.kBlue)
h_sim_relative_pt_matched_to_other[name].SetLineWidth(2)
h_sim_relative_pt_matched_to_other[name].GetXaxis().SetTitle("Pt_{sim}/Pt_{reco}")
h_sim_decay_rho[name] = ROOT.TH1D("h_sim_decay_rho_%s" % name,
"Decay radius", 100, 0, 400)
h_sim_decay_rho[name].SetLineColor(ROOT.kBlue)
h_sim_decay_rho[name].SetLineWidth(2)
h_sim_decay_rho[name].GetXaxis().SetTitle("#rho")
h_sim_decay_rho_type_matched[name] = ROOT.TH1D("h_sim_decay_rho_type_matched_%s" % name,
"Decay radius for type matched", 100, 0, 400)
h_sim_decay_rho_type_matched[name].SetLineColor(ROOT.kBlue)
h_sim_decay_rho_type_matched[name].SetLineWidth(2)
h_sim_decay_rho_type_matched[name].GetXaxis().SetTitle("#rho")
h_sim_decay_rho_wrong_type_matched[name] = ROOT.TH1D("h_sim_decay_rho_wrong_type_matched_%s" % name,
"Decay radius for matched to wrong type", 100, 0, 400)
h_sim_decay_rho_wrong_type_matched[name].SetLineColor(ROOT.kBlue)
h_sim_decay_rho_wrong_type_matched[name].SetLineWidth(2)
h_sim_decay_rho_wrong_type_matched[name].GetXaxis().SetTitle("#rho")
h_sim_decay_rho_matched_to_other[name] = ROOT.TH1D("h_sim_decay_rho_matched_to_other_%s" % name,
"Decay radius for matched to other type", 100, 0, 400)
h_sim_decay_rho_matched_to_other[name].SetLineColor(ROOT.kBlue)
h_sim_decay_rho_matched_to_other[name].SetLineWidth(2)
h_sim_decay_rho_matched_to_other[name].GetXaxis().SetTitle("#rho")
h_sim_decay_rho_matched_to_other_same_pt[name] = ROOT.TH1D("h_sim_decay_rho_matched_to_other_same_pt_%s" % name,
"Decay radius for matched to 'other' type with |Pt_sim/Pt_reco-1|<0.05", 100, 0, 400)
h_sim_decay_rho_matched_to_other_same_pt[name].SetLineColor(ROOT.kBlue)
h_sim_decay_rho_matched_to_other_same_pt[name].SetLineWidth(2)
h_sim_decay_rho_matched_to_other_same_pt[name].GetXaxis().SetTitle("#rho")
# loop over events
count= 0
for event in events:
# muons
event.getByLabel(muonLabel, muonHandle)
muons = muonHandle.product()
has_interesting_muon = False
for muon in muons:
if not isGoodMuon(muon): continue
has_interesting_muon = True
if not has_interesting_muon: continue
# check if we have a match
event.getByLabel (labelPacked, handlePacked)
event.getByLabel (labelPruned, handlePruned)
# get the product
packed = handlePacked.product()
pruned = handlePruned.product()
interesting_event = False
for p in pruned:
if abs(p.pdgId()) not in (321, 211, 2212): continue
fake_type = fake_types[abs(p.pdgId())]
# mother = find_parent(p)
# if not mother or abs(mother.pdgId()) != 511: continue
muon = None
for m in muons:
if deltaR(m,p)>0.1: continue
if abs(m.pt()/p.pt()-1)>0.02: continue
if not isGoodMuon(m): continue
muon = m
break
if not muon: continue
h_sim_match[fake_type].Fill(sim_type_to_value[get_sim_type(muon)])
if muon.simPdgId()!=0:
h_sim_relative_pt[fake_type].Fill(min(muon.simPt()/p.pt(), 1.999))
h_sim_decay_rho[fake_type].Fill(min(muon.simProdRho(), 399.999))
if fake_type == get_sim_type(muon):
h_sim_relative_pt_type_matched[fake_type].Fill(min(muon.simPt()/p.pt(), 1.999))
h_sim_decay_rho_type_matched[fake_type].Fill(min(muon.simProdRho(), 399.999))
else:
h_sim_relative_pt_wrong_type_matched[fake_type].Fill(min(muon.simPt()/p.pt(), 1.999))
h_sim_decay_rho_wrong_type_matched[fake_type].Fill(min(muon.simProdRho(), 399.999))
if get_sim_type(muon) == 'other':
h_sim_relative_pt_matched_to_other[fake_type].Fill(min(muon.simPt()/p.pt(), 1.999))
h_sim_decay_rho_matched_to_other[fake_type].Fill(min(muon.simProdRho(), 399.999))
if abs(muon.simPt()/p.pt() - 1) < 0.05:
h_sim_decay_rho_matched_to_other_same_pt[fake_type].Fill(min(muon.simProdRho(), 399.999))
else:
if muon.genParticle():
h_genid_not_matched[fake_type].Fill(min(abs(muon.genParticle().pdgId()),399.999))
else:
h_genid_not_matched[fake_type].Fill(0)
interesting_event = True
status = p.statusFlags().fromHardProcessBeforeFSR()
if dump_info:
print "Reco Muon pt : %s eta : %s phi: %s" % (muon.pt(), muon.eta(), muon.phi())
print "\tsim_pt: %s sim_pid: %s sim_mother_pid: %s sim_prod_rho: %f sim_prod_z: %f" % (muon.simPt(), muon.simPdgId(), muon.simMotherPdgId(),
muon.simProdRho(), muon.simProdZ())
print "Gen pdgId : %s pt : %s eta : %s phi : %s status: %d" %(p.pdgId(), p.pt(), p.eta(), p.phi(), status)
mother = find_parent(p)
if mother:
status = mother.statusFlags().fromHardProcessBeforeFSR()
print "\tMother PdgId : %s pt : %s eta : %s phi : %s status: %d" %(mother.pdgId(),mother.pt(),mother.eta(),mother.phi(),status)
mother = find_parent(mother)
if mother:
status = mother.statusFlags().fromHardProcessBeforeFSR()
print "\tGrand Mother PdgId : %s pt : %s eta : %s phi : %s status: %d" %(mother.pdgId(),mother.pt(),mother.eta(),mother.phi(),status)
if not interesting_event: continue
count += 1
# https://github.com/cms-sw/cmssw/blob/CMSSW_7_4_X/DataFormats/HepMCCandidate/interface/GenStatusFlags.h
if dump_info:
print "Event dump"
print "Run: %u, Event: %u" % (event.eventAuxiliary().run(),event.eventAuxiliary().event())
for p in pruned :
if not abs(p.pdgId()) in [511,521,531]: continue
final_b = True
for dau in p.daughterRefVector():
if dau.pdgId() == -p.pdgId():
final_b = False
break
if not final_b: continue
signature = 1
for pa in packed:
mother = pa.mother(0)
if mother and isAncestor(p,mother) :
if pa.pdgId()!=22: signature *= pa.pdgId()
# if not signature in [13*13*321*321, 13*13, 13*13*321, -13*13*321]: continue
# d_p4 = ROOT.Math.LorentzVector(ROOT.Math.PxPyPzE4D(ROOT.Double))()
d_p4 = ROOT.reco.Candidate.LorentzVector()
rad_p4 = ROOT.reco.Candidate.LorentzVector()
status = p.statusFlags().fromHardProcessBeforeFSR()
print " PdgId : %s pt : %s eta : %s phi : %s status: %d" %(p.pdgId(),p.pt(),p.eta(),p.phi(),status)
for dau in p.daughterRefVector():
status = dau.statusFlags().fromHardProcessBeforeFSR()
print " dau PdgId : %s pt : %s eta : %s phi : %s status: %d" %(dau.pdgId(),dau.pt(),dau.eta(),dau.phi(),status)
if p.mother():
status = p.mother().statusFlags().fromHardProcessBeforeFSR()
print " mother PdgId : %s pt : %s eta : %s phi : %s status: %d" %(p.mother().pdgId(),p.mother().pt(),p.mother().eta(),p.mother().phi(),status)
for pa in packed:
mother = pa.mother(0)
if mother and isAncestor(p,mother) :
print " PdgId : %s pt : %s eta : %s phi : %s" %(pa.pdgId(),pa.pt(),pa.eta(),pa.phi())
d_p4 += pa.p4()
if pa.pdgId()==22: rad_p4 += pa.p4()
print " delta: %0.5f%%" % (100.*(p.p4()-d_p4).P()/p.p4().P())
print " radiation: %0.2f%%" % (100.*rad_p4.P()/p.p4().P())
if dump_info and count >= 100:
sys.exit()
c1 = TCanvas("c1", "c1", 800, 800)
for id,name in fake_types.items():
h_sim_match[name].Draw()
print_canvas("sim_match_%s" % name, output_path)
h_genid_not_matched[name].Draw()
print_canvas("h_genid_not_matched_%s" % name, output_path)
h_sim_relative_pt[name].Draw()
print_canvas("sim_relative_pt_%s" % name, output_path)
h_sim_relative_pt_type_matched[name].Draw()
print_canvas("sim_relative_pt_type_matched_%s" % name, output_path)
h_sim_relative_pt_wrong_type_matched[name].Draw()
print_canvas("sim_relative_pt_wrong_type_matched_%s" % name, output_path)
h_sim_relative_pt_matched_to_other[name].Draw()
print_canvas("sim_relative_pt_matched_to_other_%s" % name, output_path)
h_sim_decay_rho[name].Draw()
print_canvas("sim_decay_rho_%s" % name, output_path)
h_sim_decay_rho_type_matched[name].Draw()
print_canvas("h_sim_decay_rho_type_matched_%s" % name, output_path)
h_sim_decay_rho_wrong_type_matched[name].Draw()
print_canvas("h_sim_decay_rho_wrong_type_matched_%s" % name, output_path)
h_sim_decay_rho_matched_to_other[name].Draw()
print_canvas("h_sim_decay_rho_matched_to_other_%s" % name, output_path)
h_sim_decay_rho_matched_to_other_same_pt[name].Draw()
print_canvas("h_sim_decay_rho_matched_to_other_same_pt_%s" % name, output_path)
# Local Variables:
# indent-tabs-mode: 1
# tab-width: 4
# python-indent: 4
# End:
| UTF-8 | Python | false | false | 16,681 | py | 117 | fake_muon_sources.py | 98 | 0.693903 | 0.655656 | 0 | 366 | 44.576503 | 216 |
cfranklin11/budget-buddy | 4,423,816,356,378 | dd7e2b4be78e42dcc63909ffaaff553f974e837c | e1686543c1cc1a038fb8a0f08696f45cd0522f0c | /data/scripts/data_processor.py | b39f751a62bc625cfa052a393fa2c62b25769470 | [
"MIT"
]
| permissive | https://github.com/cfranklin11/budget-buddy | b55bf5d1a89d8e5e5f7bb1d2e942fb70b2c871cb | dc2e05dd8012d3e3b8a011cef9bc65a912092bab | refs/heads/master | 2021-01-20T12:24:53.259410 | 2017-06-04T09:56:31 | 2017-06-04T09:56:31 | 90,361,863 | 0 | 0 | null | false | 2017-06-04T09:56:31 | 2017-05-05T09:50:20 | 2017-05-27T11:21:39 | 2017-06-04T09:56:31 | 4,572 | 0 | 0 | 0 | Jupyter Notebook | null | null | import sys
import os
import pandas as pd
import json
pd.options.mode.chained_assignment = None
def list_depts():
data = get_data()
depts = pd.unique(data['department_name'])
return depts.tolist()
def clean_data(department):
data = get_data()
dept_data = data[data['department_name'].str.lower() == department.lower()]
dept_data.loc[:, 'measure_target'] = pd.to_numeric(dept_data['measure_target'], errors='coerce')
dept_data['estimate_or_actual'] = pd.to_numeric(dept_data['estimate_or_actual'], errors='coerce').fillna(0)
df = dept_data[['program_name', 'year', 'deliverable', 'estimate_or_actual']].copy()
toc = df[df['deliverable'] == 'Total Output Cost'].rename(columns = {'estimate_or_actual':'total_output_cost'})[['program_name', 'year', 'total_output_cost']]
df2 = dept_data.copy()
df3 = df.merge(toc)
df4 = df[df['deliverable'] != 'Total Output Cost']
return df4.to_dict('records')
def get_data():
file_path = os.path.abspath(os.path.join(os.getcwd(), 'data/cleaned_data.csv'))
data = pd.read_csv(file_path)
return data
def main(args):
if len(args) > 1 and type(args[1]) == str:
return clean_data(args[1])
else:
return list_depts()
print(json.dumps(main(sys.argv)))
| UTF-8 | Python | false | false | 1,273 | py | 57 | data_processor.py | 17 | 0.64729 | 0.641005 | 0 | 40 | 30.825 | 162 |
ComboZhc/AsciiArt | 292,057,800,320 | 486ae2887383657e5cedeb17a42b99a18454cb0f | 3477bce623936d9cddbebdb7e4bb58e9c0a5199b | /ascii_art_painter.py | ad6c34390db69cca6143b51639fd7354ccc48877 | []
| no_license | https://github.com/ComboZhc/AsciiArt | bdb89f103929a738917784aadd61d5320b89d567 | 27e0feadb4be5bb35b92695f2b1e265916def409 | refs/heads/master | 2016-09-06T18:30:45.905309 | 2012-09-01T08:50:28 | 2012-09-01T08:50:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import Image, ImageFont, ImageDraw
import tkFileDialog
import sys
import itertools
import math
import numpy
src = tkFileDialog.askopenfilename()
if not src.strip():
print 'no file'
sys.exit()
ranks = []
f = open('result.txt', 'r')
for l in f:
a, b = l.split(',')
ranks.append((int(a), float(b)))
print ranks
font = ImageFont.truetype('DejaVuSansMono.ttf', 32)
X_STEP = 1
Y_STEP = 2
W_FONT = 20
H_FONT = 40
try:
i = Image.open(src)
src_pix = i.load()
src_x, src_y = i.size
if src_x % X_STEP > 0: src_x -= src_x % X_STEP
if src_y % Y_STEP > 0: src_y -= src_y % Y_STEP
dest = Image.new('RGB', (src_x * W_FONT / X_STEP, src_y * H_FONT / Y_STEP), (255, 255, 255))
dest_d = ImageDraw.Draw(dest)
#np_pix = numpy.array(src_pix)
for x in range(0, src_x, X_STEP):
for y in range(0, src_y, Y_STEP):
rgb = [0.0, 0.0, 0.0]
for r in range(0, 3):
for ii in range(x, x + X_STEP):
for jj in range(y, y + Y_STEP):
rgb[r] += src_pix[ii, jj][r]
rgb = map(lambda a: a / X_STEP / Y_STEP, rgb)
gray = math.pow((math.pow(rgb[0], 2.2) * 0.2973 + math.pow(rgb[1], 2.2) * 0.6274 + math.pow(rgb[2], 2.2) * 0.0753), 1 / 2.2)
dst_x = x * W_FONT / X_STEP
dst_y = y * H_FONT / Y_STEP
rank = int(gray / 256 * len(ranks))
s = str(chr(ranks[rank][0]))
dest_d.text((dst_x, dst_y), s, font=font, fill='black')
dest.save('result.jpg')
except IOError, e:
print e | UTF-8 | Python | false | false | 1,444 | py | 4 | ascii_art_painter.py | 2 | 0.569945 | 0.528393 | 0 | 55 | 24.290909 | 127 |
DrNolanXW/NC-Online | 3,238,405,383,923 | 267026424ba40ae2495e8efd2871b5ae50b78644 | 5ae475f0cb6c8f3db0fc4f01dfaeb7e25fdf9658 | /NCxJLu/apps/utils/__init__.py | 917e6dca2c79c1eb48a4c4d4a6be2bf7d2782d0a | []
| no_license | https://github.com/DrNolanXW/NC-Online | fff49dcec96869163ebc7552d44d8aeab2675670 | b39407e0daeac569cb78e6391513faf5c263f999 | refs/heads/master | 2020-05-27T21:20:25.058696 | 2017-03-29T09:28:49 | 2017-03-29T09:28:49 | 83,666,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'wx'
__date__ = '17-2-10 下午 4:54' | UTF-8 | Python | false | false | 74 | py | 25 | __init__.py | 23 | 0.457143 | 0.328571 | 0 | 3 | 22.666667 | 28 |
lanl/OPPPY | 352,187,359,517 | 8262c3f8fd9a1fe032099c4625fd7720048a4684 | d2435243de0b83b922fbc6f527517b932328fdac | /tests/my_test_opppy_dump_parser.py | 6de2207f2999d35f35e867515fa34abfc0787d41 | [
"BSD-3-Clause"
]
| permissive | https://github.com/lanl/OPPPY | 288be84f43a051ad90a8a1756b10d89c57b624a4 | c3f07c93da6969a01cda7f78d48d75af98765960 | refs/heads/master | 2023-04-02T16:06:25.010458 | 2023-03-22T16:08:25 | 2023-03-22T16:08:25 | 202,370,014 | 3 | 5 | NOASSERTION | false | 2023-03-22T18:04:00 | 2019-08-14T14:44:21 | 2023-02-02T21:10:04 | 2023-03-22T18:03:59 | 6,750 | 3 | 5 | 4 | Python | false | false | # a simple example opppy dump parser
import sys
import re
from numpy import *
sys.path.append('..')
from opppy.plotting_help import *
class my_test_opppy_dump_parser():
'''
An example OPPPY dump parser that can extract data from a simple ASCII dump file
'''
def __init__(self):
# initialize the cycle parsing data
print("Initializing my_test_opppy_dump_parser")
def build_data_dictionary(self, filename, dump_keys=None):
dump_file = open(filename,'r')
lines = dump_file.readlines()
data = {}
keys = []
if dump_keys:
keys = dump_keys
else:
for line in lines:
if "keys" in line and not dump_keys:
keys = line.strip('\n').split(' ')[1:]
break
data = {}
for line in lines:
for key in keys:
if key in line and len(key) is len(line.split(':')[0]):
data[key] = array(str_vector_to_float_vector(line.strip('\n').split(' ')[1:]))
# build xy_verts for 2d mesh plotting example
if 'x' in data and 'y' in data:
xy_verts = []
for x, y in zip(data['x'], data['y']):
xy_verts.append([[x-0.5,y-0.5],[x+0.5,y-0.5],[x+0.5,y+0.5],[x-0.5,y+0.5]])
data['xy_verts'] = xy_verts
return data
| UTF-8 | Python | false | false | 1,388 | py | 58 | my_test_opppy_dump_parser.py | 26 | 0.519452 | 0.505043 | 0 | 44 | 30.318182 | 98 |
okij98/Library | 10,230,612,122,375 | c000ec43b8b99b62677431b8568a25ef3cbbee1f | 0c6d0955c2c52f311c8e6d957258fb9ba9489ca8 | /handlers/root.py | 3222187d9e01d61d923a639b9ca84f6a4a127610 | []
| no_license | https://github.com/okij98/Library | d8af89e3bf1ca9c1bccc4261d2f59b348d3d4859 | 64d3e90f8082a8c7f5bd4863fe426f4997af6c66 | refs/heads/master | 2017-12-03T07:27:01.649134 | 2016-06-01T03:10:30 | 2016-06-01T03:10:30 | 60,082,688 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import tornado.web
import methods.readdb as mrd
class UserHandler(tornado.web.RequestHandler):
def get(self):
username = self.get_argument("user")
user_infos = mrd.select_table(table="users",column="*",condition="username",value=username)
if user_infos[0][1] == 'root':
x = 1
else:
x = 0
self.render("root.html", users = user_infos, x = x)
class AddUser(tornado.web.RequestHandler):
def get(self):
user_infos = mrd.select_columns(table="users", column="username")
self.render("addUser.html", users = user_infos)
def post(self, *args, **kwargs):
username = self.get_argument("username")
password = self.get_argument("password")
if mrd.add_user(table="users",username=username,password=password):
self.write("add success!")
else:
self.write("error")
class DeleteUser(tornado.web.RequestHandler):
def get(self):
user_infos = mrd.select_columns(table="users",column="username")
self.render("deleteUser.html", users = user_infos)
def post(self, *args, **kwargs):
username = self.get_argument("username")
if mrd.delete_user(table="users",username=username):
self.write("delete success!")
else:
self.write("error!") | UTF-8 | Python | false | false | 1,349 | py | 11 | root.py | 6 | 0.613788 | 0.610082 | 0 | 39 | 33.615385 | 99 |
paulohrpinheiro/flask-esqueletico | 4,724,464,076,093 | 400d270165ea8bcbf6db8f2a40586d9283e0b6a3 | 8e7809d38c78493a36a2ab54ebab95b540937da2 | /app/__init__.py | b75db0bbf85873f0c0328055f2b4f75d9be42659 | []
| no_license | https://github.com/paulohrpinheiro/flask-esqueletico | 169b2794075d9fb8f149d7e4875bf22db77a2b2a | 4d8454c14107ea5bee396c7217ce053987248e92 | refs/heads/master | 2021-01-12T12:06:48.929324 | 2016-10-29T18:45:05 | 2016-10-29T18:45:05 | 72,302,855 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
app.config.from_object('config')
from app.mod_root.controllers import mod_root as root_module
app.register_blueprint(root_module)
application = app
| UTF-8 | Python | false | false | 214 | py | 5 | __init__.py | 3 | 0.771028 | 0.771028 | 0 | 9 | 22.777778 | 60 |
TECH-IN-EYE/Speech-to-text-data_collection | 3,178,275,823,237 | 2f6faff143398d4b9e5870329bd9c2683c9d914f | af40414e861a35de3eda304c46a2ebac42e49f3e | /scripts/sendtext.py | 5e553aac11e757c7dab1692db02b81d7b973e31d | []
| no_license | https://github.com/TECH-IN-EYE/Speech-to-text-data_collection | 147ec806c929cf0ab8191aa26cb90c1989e769c1 | 7bba5778593e89548a8538905f0239bb782aee57 | refs/heads/main | 2023-08-07T19:24:03.939888 | 2021-09-15T07:02:17 | 2021-09-15T07:02:17 | 408,710,261 | 0 | 1 | null | true | 2021-09-21T06:24:08 | 2021-09-21T06:24:08 | 2021-09-16T00:21:54 | 2021-09-15T11:09:19 | 40,403 | 0 | 0 | 0 | null | false | false | import pyspark
import json
from pyspark.sql import SparkSession
from kafka import KafkaProducer
from kafka.errors import KafkaError
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages com.amazonaws:aws-java-sdk-pom:1.11.538,org.apache.hadoop:hadoop-aws:2.7.3 pyspark-shell'
spark = SparkSession.builder.appName('testspark').getOrCreate()
producer = KafkaProducer(bootstrap_servers=["b-1.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092",
"b-2.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092"],api_version = (0,10,1))
df = spark.read.csv("s3://fumbabucket/Clean_Amharic.csv")
print(df.head())
for row in df.head(5):
#print(json.dumps(row).encode('utf-8'))
future = producer.send('group6_test',json.dumps(row).encode('utf-8'))
try:
record_metadata = future.get(timeout=10)
except KafkaError:
log.exception()
pass
print (record_metadata.topic)
| UTF-8 | Python | false | false | 911 | py | 20 | sendtext.py | 9 | 0.720088 | 0.673985 | 0 | 24 | 36.916667 | 138 |
aethylx/intern-proj-2017 | 3,015,067,086,971 | 8c6f03d66ae18ab8ba48fc2fe01c289dd8b5f76f | be21b39697605fbf07c2ae66b9e33c56c142e3b9 | /project/urls.py | ba01e8a5768b65c299b1824219b3ca43e49ac974 | []
| no_license | https://github.com/aethylx/intern-proj-2017 | c9ad51d774267fec94b5cc4f94a0499ba4360d38 | ae3831e59deca6bd62a277a7c926c173f523c476 | refs/heads/master | 2018-02-08T23:28:48.508245 | 2016-11-14T21:53:20 | 2016-11-14T21:53:20 | 96,582,845 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url, include
from rest_framework import routers
from django.contrib import admin
from django.contrib.auth import views as auth_views, forms as auth_forms
from django.views.generic.edit import CreateView
from project import views
urlpatterns = [
# router.urls,
url(r'^$', auth_views.login, name='login_page'),
url(r'^projects/activities/(?P<pk>\d+)/$', views.activity_edit, name='activity_edit'),
url(r'^projects/activities/assign/(?P<pk>\d+)/$', views.activity_assignment, name='activity_assignment'),
url(r'^projects/resources/(?P<pk>\d+)/$', views.resource_edit, name='resource_edit'),
url(r'^projects/(?P<pk>\d+)/$', views.project_detail, name='project_detail'),
#url(r'^projects/(?P<slug>[^/]+)/$', views.project_detail , name='project_detail'),
url(r'^projects/new/$', views.project_new, name='project_new'),
url(r'^projects', views.project_list, name='project_list'),
url(r'^clients/(?P<pk>\d+)/$', views.client_detail, name='client_detail'),
url(r'^clients/new/$', views.client_new, name='client_new'),
url(r'^clients', views.client_list, name='client_list'),
url(r'^home', views.index_page, name='index_page'),
url(r'^logout', views.logout_page, name='logout_page'),
url(r'^register/$', views.register, name='register'),
# url(r'^register/$', CreateView.as_view(template_name='registration/register.html', form_class=auth_forms.UserCreationForm, success_url='/'), name='register'),
# url('^accounts/', include('django.contrib.auth.urls')),
url(r'^register/success/$', views.register_success, name='register_success'),
url(r'^create_activity/$', views.create_activity, name='create_activity'),
url(r'^update_activity/$', views.update_activity, name='update_activity'),
url(r'^update_activity_dates/$', views.update_activity_dates, name='update_activity_dates'),
url(r'^delete_activity/$', views.delete_activity, name='delete_activity'),
url(r'^create_link/$', views.create_link, name='create_link'),
url(r'^update_link/$', views.update_link, name='update_link'),
url(r'^delete_link/$', views.delete_link, name='delete_link'),
url(r'^update_lmodel/$', views.update_lmodel, name='update_lmodel'),
url(r'^lock_object/$', views.lock_object, name='lock_object'),
url(r'^unlock_object/$', views.unlock_object, name='unlock_object'),
]
| UTF-8 | Python | false | false | 2,379 | py | 34 | urls.py | 13 | 0.678016 | 0.678016 | 0 | 38 | 61.605263 | 164 |
wkeenan/HTTPi | 12,412,455,485,661 | 2452805ba645b19675189e05763a7798f9eb5372 | 80107d5947c802e44f3d93d021881c57e011aeb3 | /shared/lib/ipi/bluetooth.py | 387a477d104d67d60c609c6841344c511f0fdb05 | []
| no_license | https://github.com/wkeenan/HTTPi | e4b1befc6576798adc74eda96571c7be3c302362 | 0302f274ae62cc87d8f764d7b3115715eea0ff0c | refs/heads/master | 2015-08-07T17:09:42.783094 | 2013-04-21T22:10:46 | 2013-04-21T22:10:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!python
# This 'bluetooth' module name clashes with the system package 'bluetoot' so we need to resolve that...
# http://stackoverflow.com/questions/4816595/how-do-i-import-a-module-whose-name-clashes-with-a-module-in-my-package
from __future__ import absolute_import
import os
import sys
import atexit
from ipi.utils import iPiController
current_dir = os.path.dirname(os.path.abspath(__file__))
print __name__ + ".current_dir = " + current_dir
try:
haveBluetooth = False
import bluetooth as bt
haveBluetooth = True
except ImportError:
haveBluetooth = False
#@iPiController
class BluetoothController(iPiController):
def __init__(self):
self.btenabled = haveBluetooth
self.btConnected = False
return
def construct(self):
if (haveBluetooth):
self.btenabled = True
self.btConnected = False
else:
self.btenabled = False
self.btConnected = False
atexit.register(self.btDisconnect)
def btConnect(self, address, port):
print "btConnect address=" + address + ", port=" + str(port)
if (self.btenabled):
try:
self.client_socket = bt.BluetoothSocket(bt.RFCOMM)
self.client_socket.connect((address, port))
self.btConnected = True
self.client_socket.send(chr(0))
except bt.BluetoothError, e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print e
print(exc_type, fname, exc_tb.tb_lineno)
print "Failed to connect to address=" + address + ", port=" + str(port)
self.btConnected = False
def btDisconnect(self):
print __name__ + "Discconect"
self.btSendByte(0)
def btSendByte(self, byteVal):
print "btSendByte = %s" % hex(byteVal)
#if (self.btenabled and not self.btconnected):
# self.btConnect("00:12:05:09:90:65", 1) #TODO: this seems to be asyn and next send errors due to connect not occring
if (self.btConnected):
self.client_socket.send(chr(int(byteVal)))
def listServices(self):
nearby_devices = bt.discover_devices(lookup_names=True)
print "found %d devices" % len(nearby_devices)
for name, addr in nearby_devices:
print " %s - %s" % (addr, name)
return nearby_devices
### cherrypy config (no dependancy)
_cp_config = { "/" :
{
'tools.gzip.on': True
}
}
listServices.exposed = True
#listServices._cp_config = {'request.show_tracebacks': False}
###
| UTF-8 | Python | false | false | 2,779 | py | 20 | bluetooth.py | 17 | 0.587621 | 0.579345 | 0 | 95 | 28.242105 | 137 |
luc3001/bfg9000 | 16,372,415,378,287 | c8acc767216fc1d22f11f191afe3eba52f4935b8 | 0c452a191d35a26499abec71854f8a8cdb1efc68 | /test/unit/__init__.py | 8f448be0d2633a5c28cb29c363d1df7b7f5aeb1b | [
"BSD-3-Clause"
]
| permissive | https://github.com/luc3001/bfg9000 | fe609d64d7b605fef0ffb375873729c8cf0bd641 | 41452e9dd12f1a44bae68d3bf44f362d283e6802 | refs/heads/master | 2020-09-15T17:10:52.434281 | 2019-11-22T04:33:26 | 2019-11-22T04:33:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mock
import ntpath
import os.path
import posixpath
import sys
from six import iteritems
from .. import *
from bfg9000.path import Path
from bfg9000.platforms.posix import PosixPath
from bfg9000.platforms.windows import WindowsPath
if sys.version_info >= (3,):
mock_open_name = 'builtins.open'
else:
mock_open_name = '__builtin__.open'
# Fix the mock package's mock_open function to work with iter(); note: this is
# already fixed in Python 3.7.1's unittest.mock.
def mock_open(*args, **kwargs):
mo = mock.mock_open(*args, **kwargs)
handle = mo.return_value
handle.__iter__.side_effect = lambda: iter(handle.readlines.side_effect())
return mo
def skip_if_platform(platform, hide=False):
return skip_pred(lambda x: x.platform_name == platform,
'not supported for platform "{}"'.format(platform), hide)
def only_if_platform(platform, hide=False):
return skip_pred(lambda x: x.platform_name != platform,
'only supported for platform "{}"'.format(platform), hide)
class AttrDict(object):
def __init__(self, **kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
class CrossPlatformTestCase(TestCase):
_platforms = ['linux', 'winnt', 'macos']
def __init__(self, *args, **kwargs):
clear_variables = kwargs.pop('clear_variables', False)
variables = kwargs.pop('variables', {})
self.platform_name = kwargs.pop('platform_name', None)
TestCase.__init__(self, *args, **kwargs)
if self.platform_name is None:
return
self.env = make_env(platform=self.platform_name,
clear_variables=clear_variables,
variables=variables)
@property
def Path(self):
return self.env.host_platform.Path
def shortDescription(self):
return self.platform_name
def parameterize(self):
return parameterize_tests(self, platform_name=self._platforms)
class PathTestCase(TestCase):
_path_infos = [
(Path, os.path, 'native'),
(PosixPath, posixpath, 'posix'),
(WindowsPath, ntpath, 'windows'),
]
def __init__(self, *args, **kwargs):
info = kwargs.pop('path_info', None)
TestCase.__init__(self, *args, **kwargs)
if info is None:
return
self.Path, self.ospath, self._desc = info
def shortDescription(self):
return self._desc
def parameterize(self):
return parameterize_tests(self, path_info=self._path_infos)
| UTF-8 | Python | false | false | 2,566 | py | 23 | __init__.py | 22 | 0.626656 | 0.620421 | 0 | 91 | 27.197802 | 79 |
cyh24/media-player | 1,314,259,999,789 | 473278eed9b438b90b811789f2179727a748747a | 1fe33ab269c3f68f6dbf7cf820c9fd0dc8d77f10 | /app/dbprocess.py | b58956e6fb447e56ae5f1cf2aa8265aa2ec76abb | []
| no_license | https://github.com/cyh24/media-player | 703b6ef09762e598c1b26bdfc49305415961d1f1 | bb379e62b20fc5236fb30ed663f731e0c9128a01 | refs/heads/master | 2016-08-02T22:12:07.375269 | 2015-07-12T15:44:04 | 2015-07-12T15:44:04 | 38,149,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#-*- coding:utf-8 -*-
from django.db import models
from models import Teacher, Comments, Video, Serial
import time
def curTime():
return time.strftime('%Y.%m.%d-%H:%M:%S')
def add_teacher(name, info=""):
t = Teacher(name=name, info=info)
t.save()
def add_serial(title, teacher_id, video_num, videos, money, logo_img):
serial = Serial(title=title, teacher=Teacher.objects.filter(id=teacher_id)[0],
video_num=video_num, money=money, logo_img=logo_img)
for video in videos:
video.save()
serial.save()
serial.videos = videos
serial.save()
if __name__ == "__main__":
t_name = "cyh"
t_info = "Handsome, cool, humor."
add_teacher(t_name, t_info)
| UTF-8 | Python | false | false | 734 | py | 12 | dbprocess.py | 4 | 0.623978 | 0.621253 | 0 | 30 | 23.433333 | 82 |
CrisBMoya/IEEE_Fraud_Detection | 2,370,821,947,644 | ee2b1e6b01bce3f0ddbf642d0f43aa43ed0862b3 | fbff2c7ef32a140f5780be7133f56322f1aef37b | /IEEE_Fraud_Detection_v6.py | 6b8a3836c3135e296319b62c4f1fbee2144922c0 | []
| no_license | https://github.com/CrisBMoya/IEEE_Fraud_Detection | 3f064f50bf70dba4d92260f9e29ff820e9b58914 | 7d742781993896b18ae3b6a7b61f64d9f066f8ca | refs/heads/master | 2020-07-23T03:56:27.809266 | 2019-09-17T14:37:37 | 2019-09-17T14:37:37 | 207,438,697 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | %config Completer.use_jedi = False
#Import Modules
import pandas as pd
import zipfile as zip
import plotly as plt
import numpy as np
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import pandas_profiling
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from lightgbm import LGBMClassifier
from imblearn.under_sampling import EditedNearestNeighbours
#Load Training data
TrainTransaction=pd.read_csv(zip.ZipFile('Data/train_transaction.csv.zip').open("train_transaction.csv"))
TestSet_dev=pd.read_csv(zip.ZipFile('Data/test_transaction.csv.zip').open("test_transaction.csv"))
#Generate quantiles
BinNum=20
TrainTransaction['QuantileAmt']=pd.qcut(x=TrainTransaction['TransactionAmt'], q=BinNum, labels=['Q'+str(X) for X in range(1,(BinNum+1))])
TestSet_dev['QuantileAmt']=pd.qcut(x=TestSet_dev['TransactionAmt'], q=BinNum, labels=['Q'+str(X) for X in range(1,(BinNum+1))])
############################################################
############################################################
############################################################
############################################################
ColumnSelect=np.asarray(["C"+str(X) for X in range(1,15)])
TempTrain=TrainTransaction[ColumnSelect]
TempTrain=TempTrain.join([pd.get_dummies(data=TrainTransaction["ProductCD"]), pd.get_dummies(data=TrainTransaction["P_emaildomain"]), pd.get_dummies(data=TrainTransaction["QuantileAmt"])])
pd.get_dummies(data=TrainTransaction["P_emaildomain"]).shape
pd.get_dummies(data=TrainTransaction["ProductCD"]).shape
TempTrain.shape
#Undersample
enn = EditedNearestNeighbours()
X_resampled, y_resampled = enn.fit_resample(TempTrain.iloc[1:1000:], TrainTransaction['isFraud'].iloc[1:1000:])
X_resampled
#Train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.1, random_state=42)
#Set up SDG Model with Grid Search
LGBMModel=LGBMClassifier()
LGBMModel.fit(X_train, y_train)
#Predict
Predictions=LGBMModel.predict(TempTrain)
#Metrics
print(confusion_matrix(y_test, Predictions))
print(classification_report(y_test, Predictions))
#Save Parameters
text_file = open("Params_V6.txt", "w")
text_file.write("%s\n" % confusion_matrix(y_test, Predictions))
text_file.write("%s\n" % classification_report(y_test, Predictions))
text_file.close()
#Try with test
TestSet_dev_Temp=TestSet_dev[ColumnSelect]
TestSet_dev_Temp=TestSet_dev_Temp.join([pd.get_dummies(data=TestSet_dev["ProductCD"]), pd.get_dummies(data=TestSet_dev["P_emaildomain"]), pd.get_dummies(data=TestSet_dev["QuantileAmt"])])
TestSet_dev_Temp.columns.values
TestSet_dev_Temp.drop(columns='scranton.edu', inplace=True)
TempTrain.columns.values
##################
#Submit predictions
PredictedValues_Dev=LGBMModel.predict(TestSet_dev_Temp)
#Generate file
SubmitResults=pd.DataFrame(data={'TransactionID':TestSet_dev['TransactionID'], 'isFraud':PredictedValues_Dev})
SubmitResults.head()
SubmitResults.to_csv(path_or_buf='SubmitResults_V6.csv',index=False)
#Submit through API
import os
RE=True
if RE==True:
os.system('kaggle competitions submit -c ieee-fraud-detection -f SubmitResults_V6.csv -m "V6 Submission from API with EDA"')
pass | UTF-8 | Python | false | false | 3,318 | py | 12 | IEEE_Fraud_Detection_v6.py | 5 | 0.720313 | 0.712176 | 0 | 83 | 38.987952 | 188 |
tlnguyen2018/holbertonschool-higher_level_programming | 16,114,717,305,278 | d56998f13dbd3431f9476901875526ac01aaab94 | dc0a1a670dcbf862d45f8957419b04beffc9462d | /0x0B-python-input_output/12-student.py | d2de43079656aacd0061900bdf97f32af50aaa60 | []
| no_license | https://github.com/tlnguyen2018/holbertonschool-higher_level_programming | 3b4e67895e1fe70b9a727893edcfafe86d159d19 | fd307b134fb298e1b9e0ef19c353f0b40ae5998c | refs/heads/master | 2020-04-09T09:58:55.209093 | 2019-01-26T01:47:36 | 2019-01-26T01:47:36 | 160,253,304 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
"""
class student
like task 11, adding attribute presentation condition
"""
class Student:
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
"""
Check if attr is list, not return everything
"""
if isinstance(attrs, list):
for items in attrs:
if not isinstance(items, str):
return self.__dict__
return {key: self.__dict__[key] for key in attrs
if key in self.__dict__}
return self.__dict__
| UTF-8 | Python | false | false | 663 | py | 59 | 12-student.py | 43 | 0.541478 | 0.536953 | 0 | 24 | 26.625 | 60 |
tomstorey/Z80_clock | 11,089,605,584,234 | 954cc7f5d457650b01bf06f7517cb78c6f47a89e | 600b7b596918aa8b96b8b2db4aa33e7b3413fc08 | /source/loader.py | cf2efc71c90b15f53c9a8a4101e8fac661581e4a | []
| no_license | https://github.com/tomstorey/Z80_clock | c2f1cb5215873b3f4094b48b041bca18a3ae9800 | f041c63a229f3253705ac12fa05ea7d73a1caf6b | refs/heads/master | 2022-06-20T03:21:54.396241 | 2022-06-06T16:09:46 | 2022-06-06T16:09:46 | 252,347,460 | 5 | 1 | null | false | 2022-06-06T16:09:47 | 2020-04-02T03:37:08 | 2022-05-30T21:22:10 | 2022-06-06T16:09:46 | 6,185 | 4 | 1 | 0 | Assembly | false | false | import argparse
import os
import time
from serial import Serial
from array import array
def main():
parser = argparse.ArgumentParser(
description='Load application software via UART'
)
parser.add_argument(
'-b', '--base',
dest='base', type=str, required=True,
help='Base address where the software will be loaded, usually in RAM. '
'Supply in hexadecimal format. Minimum 0x0000, maximum 0xFFFF.'
)
parser.add_argument(
'-e', '--exec',
dest='exec', action='store_true', default=False,
help='After loading, execute from base address'
)
parser.add_argument(
'filename',
type=str,
help='Filename that should be loaded. Must be a binary file, not hex.'
)
args = parser.parse_args()
base = int(args.base, 16)
exec = args.exec
filename = args.filename
if not (0 <= base <= 0xFFFF):
raise ValueError(
'Base has invalid value. Minimum 0x0000, maximum 0xFFFF.'
)
filesize = os.stat(filename).st_size
if not (1 <= filesize <= 0xFFFF):
raise ValueError(
'Size of file is invalid. Minimum 1 byte, maximum 65536.'
)
if base + filesize > 0xFFFF:
raise ValueError(
'Base address plus file size would cause an address overflow.'
)
base_le = (base & 0xFF) << 8 | (base & 0xFF00) >> 8
filesize_le = (filesize & 0xFF) << 8 | (filesize & 0xFF00) >> 8
rom = array('B')
with open(filename, 'rb') as f:
rom.fromfile(f, filesize)
data = [
3,
(filesize & 0xFF), (filesize & 0xFF00) >> 8,
(base & 0xFF), (base & 0xFF00) >> 8
] + list(rom)
if exec is True:
data += [
4,
(base & 0xFF), (base & 0xFF00) >> 8
]
data = bytes(data)
ser = Serial(
'/dev/cu.usbserial-FT94JP1H',
baudrate=57600,
timeout=1
)
if ser.in_waiting > 0:
ser.read(size=ser.in_waiting)
# Wait for serial loader to be available
print('Waiting for serial loader availability: ', end='', flush=True)
failed = 0
while True:
ser.write(bytes([0x01]))
ser.flush()
try:
if ord(ser.read(size=1)) == 64:
print(' OK')
break
except TypeError:
failed += 1
print('.', end='', flush=True)
if failed == 10:
print(' Failed after too many attempts')
return
start = time.time()
print('Transferring %d bytes to 0x%4.4X: ' % (filesize, base), end='', flush=True)
ser.write(data)
ser.flush()
failed = 0
while True:
try:
if ord(ser.read(size=1)) == 33:
break
except TypeError:
failed += 1
print('.', end='', flush=True)
if failed == 5:
print(' Failed: transfer not acknowledged')
return
duration = time.time() - start
print(' Done in %.3fs' % duration)
failed = 0
if exec is True:
print('Executing from 0x%4.4X: ' % base, end='', flush=True)
while True:
try:
if ord(ser.read(size=1)) == 43:
break
except TypeError:
failed += 1
print('.', end='', flush=True)
if failed == 2:
print(' Failed: execution not acknowledged')
return
print(' OK')
ser.close()
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,637 | py | 24 | loader.py | 7 | 0.509761 | 0.48419 | 0 | 160 | 21.73125 | 86 |
shivamShankhdhar/ChatPlayGround | 8,134,668,086,622 | 809d7e35b3695648c33ee91a64052703eeac3c27 | edfaac4e34bd48e3fda1d726f839933ba8ae6ccb | /accounts/views.py | 2e212dbbc45e9e9d9622295ba87e32a98c9d3b75 | []
| no_license | https://github.com/shivamShankhdhar/ChatPlayGround | 9733a88972cb0129ef8294b6b3bcf71ed42e116a | 9076bc98b4e83c4123468aabb727b9bdd759aac4 | refs/heads/master | 2023-02-15T21:02:22.432301 | 2021-01-12T10:46:12 | 2021-01-12T10:46:12 | 328,175,237 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse, request
# FORMS
from accounts.forms import AccountAuthenticationForm, RegistrationForm, AccountUpdateForm
# IMPORTED FOR CREATING LOGIN, LOGOUT AND AUTHENTICATING TO THE USER
from django.contrib.auth import login, authenticate, logout, views
# imported for BASE_URL
from django.conf import settings
# ACCOUNTS MODELS
from accounts.models import Account
# required for image cropping and saving
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
import os
import cv2
import json
import base64
# import requests
from django.core import files
TEMP_PROFILE_IMAGE_NAME = "temp_profile_image.png"
# import utils
from friend.utils import get_friend_request_or_false
from friend.friend_request_status import FriendRequestStatus
from friend.models import FriendList, FriendRequest
# views
def register_view(request, *args, **kwargs):
template_name = 'accounts/register.html'
user = request.user
if user.is_authenticated:
return HttpResponse(f"You are already authenticated as {user.email}.")
context = {}
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
# after registerration user will automatic login
email = form.cleaned_data.get('email').lower()
raw_password = form.cleaned_data.get('password1')
account = authenticate(email=email, password=raw_password)
login(request, account)
destination = get_redirect_if_exists(request)
if destination:
return redirect(destination)
return redirect("home")
else:
context['registration_form'] = form
return render(request, template_name, context)
def logout_view(request):
logout(request)
return redirect('home')
def login_view(request, *args, **kwargs):
template_name = 'accounts/login.html'
context = {}
if request.user.is_authenticated:
return redirect('home')
destination = get_redirect_if_exists(request)
if request.POST:
form = AccountAuthenticationForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
destination = get_redirect_if_exists(request)
if destination:
return redirect(destination)
return redirect('home')
else:
context['login_form'] = form
return render(request, template_name, context)
def get_redirect_if_exists(request):
redirect = None
if request.GET:
if request.Get.get("next"):
redirect = str(request.GET.get('next'))
def account_view(request, *args, **kwargs):
"""
-Logic here is kidn of tricky
is_self (boolean)
-1: NO_REQUEST_SENT
0:THEM_SENT_TO_YOU
1:YOU_SENT_TO_THEM
"""
context = {}
template_name = 'accounts/account.html'
user_id = kwargs.get("user_id")
try:
account = Account.objects.get(pk=user_id)
except Account.DoesNotExist:
return HttpResponse("that user doesn't exist.")
# if account exists
if account:
context['id'] = account.id
context['username'] = account.username
context['email'] = account.email
context['profile_image'] = account.profile_image.url
context['hide_email'] = account.hide_email
try:
friend_list = FriendList.objects.get(user = account)
except FriendList.DoesNotExist:
friend_list = FriendList(user = account)
friend_list.save()
friends = friend_list.friends.all()
context['friends'] = friends
# state tamplate variables
is_self = True
is_friend = False
request_sent = FriendRequestStatus.NO_REQUEST_SENT.value
friend_requests = None
user = request.user
if user.is_authenticated and user != account:
is_self = False
if friends.filter(id = user.id):
is_friend = True
else:
is_friend = False
# CASE1: Request has been sent from them to you
if get_friend_request_or_false(sender = account, receiver = user) != False:
request_sent = FriendRequestStatus.THEM_SENT_TO_YOU.value
context["pending_friend_request_id"] = get_friend_request_or_false(sender = account, receiver = user).id
#CASE 2:Request has been sent to them from you:
# FriendRequestStatus.YOU_SENT_TO_THEM
elif get_friend_request_or_false(sender = account, receiver = user) != False:
request_sent = FriendRequestStatus.YOU_SENT_TO_THEM.value
else:
request_sent = FriendRequestStatus.NO_REQUEST_SENT.value
elif not user.is_authenticated:
is_self = False
# user looking to the self profile
else:
try:
friend_requests = FriendRequest.objects.filter(receiver = user, is_active = True)
except:
pass
context["is_self"] = is_self
context['is_friend'] = is_friend
context['BASE_URL'] = settings.BASE_URL
context["request_sent"] = request_sent
context["friend_requests"] = friend_requests
return render(request, template_name, context)
# search functionality
def account_search_view(request, *args, **kwargs):
context = {}
if request.method == "GET":
search_query = request.GET.get("q")
if len(search_query) > 0:
search_results = Account.objects.filter(email__icontains=search_query).filter(
username__icontains=search_query).distinct()
user = request.user
accounts = [] # [(account1, True), (account2, False), ...]
for account in search_results:
accounts.append((account, False)) # you have no friends yet
context['accounts'] = accounts
return render(request, "accounts/search_result.html", context)
# views commentd
"""
context = {}
if request.method == "GET":
search_query = request.GET.get("q")
if len(search_query) > 0:
search_results = Account.objects.filter(email__icontains=search_query).filter(
username__icontains=search_query).distinct()
user = request.user
accounts = [] # [(account1, True), (account2, False), ...]
for account in search_results:
accounts.append((account, False)) # you have no friends yet
context['accounts'] = accounts
return render(request, "accounts/search_results.html", context)
"""
# end views
def edit_account_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("login")
user_id = kwargs.get("user_id")
account = Account.objects.get(pk=user_id)
if account.pk != request.user.pk:
return HttpResponse("You cannot edit someone elses profile.")
context = {}
if request.POST:
form = AccountUpdateForm(
request.POST, request.FILES, instance=request.user)
if form.is_valid():
# delete old profile image so the name preserved
# account.profile_image.delete()
form.save()
# new_username = form.cleaned_data['username']
return redirect("accounts:view", user_id=account.pk)
else:
form = AccountUpdateForm(request.POST, instance=request.user,
initial={
"id": account.pk,
"email": account.email,
"username": account.username,
"profile_image": account.profile_image,
"hide_email": account.hide_email,
}
)
context['form'] = form
else:
form = AccountUpdateForm(
initial={
"id": account.pk,
"email": account.email,
"username": account.username,
"profile_image": account.profile_image,
"hide_email": account.hide_email,
}
)
context['form'] = form
context['DATA_UPLOAD_MAX_MEMORY_SIZE'] = settings.DATA_UPLOAD_MAX_MEMORY_SIZE
return render(request, "accounts/edit_account.html", context)
def save_temp_profile_image_from_base64String(imageString, user):
INCORRECT_PADDING_EXCEPTION = "Incorrect padding"
try:
if not os.path.exists(settings.TEMP):
os.mkdir(settings.TEMP)
if not os.path.exists(settings.TEMP + "/" + str(user.pk)):
os.mkdir(settings.TEMP + "/" + str(user.pk))
url = os.path.join(settings.TEMP + "/" + str(user.pk),
TEMP_PROFILE_IMAGE_NAME)
storage = FileSystemStorage(location=url)
image = base64.b64decode(imageString)
with storage.open('', 'wb+') as destination:
destination.write(image)
destination.close()
return url
except Exception as e:
# print("exception: " + str(e))
# workaround for an issue I found
if str(e) == INCORRECT_PADDING_EXCEPTION:
imageString += "=" * ((4 - len(imageString) % 4) % 4)
return save_temp_profile_image_from_base64String(imageString, user)
return None
def crop_image(request, *args, **kwargs):
payload = {}
user = request.user
if request.POST and user.is_authenticated:
try:
imageString = request.POST.get("image")
url = save_temp_profile_image_from_base64String(imageString, user)
img = cv2.imread(url)
cropX = int(float(str(request.POST.get("cropX"))))
cropY = int(float(str(request.POST.get("cropY"))))
cropWidth = int(float(str(request.POST.get("cropWidth"))))
cropHeight = int(float(str(request.POST.get("cropHeight"))))
if cropX < 0:
cropX = 0
if cropY < 0: # There is a bug with cropperjs. y can be negative.
cropY = 0
crop_img = img[cropY:cropY+cropHeight, cropX:cropX+cropWidth]
cv2.imwrite(url, crop_img)
# delete the old image
user.profile_image.delete()
# Save the cropped image to user model
user.profile_image.save(
"profile_image.png", files.File(open(url, 'rb')))
user.save()
payload['result'] = "success"
payload['cropped_profile_image'] = user.profile_image.url
# delete temp file
os.remove(url)
except Exception as e:
print("exception: " + str(e))
payload['result'] = "error"
payload['exception'] = str(e)
return HttpResponse(json.dumps(payload), content_type="application/json")
| UTF-8 | Python | false | false | 11,321 | py | 14 | views.py | 8 | 0.590319 | 0.587316 | 0 | 314 | 35.050955 | 124 |
starsfamily/PyStudio | 14,671,608,306,808 | 85d287ce95be19e451e6fe5b7349aa540f8c4898 | e6722fb2cdbb58d76d05272c8a3a7b39a1eb711d | /分行显示星期(list操作).py | 4a99b6b43efca263286105222d25cd494b5b264e | []
| no_license | https://github.com/starsfamily/PyStudio | cd14a3c8e064fe1e1cd2a7736f01e022d4c9b852 | 3665cdb9bed57b4d47d69273aeadca68a222cbb4 | refs/heads/master | 2021-06-19T19:36:08.232408 | 2021-02-14T09:09:03 | 2021-02-14T09:09:03 | 173,943,021 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | week=['Mon.', 'Tues.', 'Wedn.', 'Thur.', 'Fri.']
weekend=['Sat.', 'Sun.']
#week.append(weekend)
week.extend(weekend)
for i, j in enumerate(week, 1):
print(i, j)
for element in week:
print(element)
| UTF-8 | Python | false | false | 207 | py | 40 | 分行显示星期(list操作).py | 36 | 0.603865 | 0.599034 | 0 | 9 | 21.888889 | 48 |
chbicibi/vae_mnist | 13,709,535,649,708 | 2f0fb7f532f45290d8aac61e201db071111bf6a6 | 8a9e3584a4236c197f83fa92fafb48628ee9b4d0 | /dataset.py | e5027f496a18bb4a33f3290dc2840bda8fd2efbc | []
| no_license | https://github.com/chbicibi/vae_mnist | fc86e7072fe75ad8f5d33773c04d74c4ce8225cf | 41d9e199c1670896509208b7b5f0def90d7359a6 | refs/heads/master | 2020-04-21T06:34:21.761502 | 2019-02-15T10:01:37 | 2019-02-15T10:01:37 | 169,368,961 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
import configparser
import math
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
import myutils as ut
import common as C_
DEBUG0 = False
################################################################################
# 学習イテレータ
################################################################################
class ContainerBase(object):
def __init__(self, it):
self.it = it
self.data = None
self.len = len(it)
def __len__(self):
return self.len
def __getitem__(self, key):
if type(key) is tuple:
head, *tail = key
if type(head) is slice:
tail = (slice(None), *tail)
return np.array(self[head])[tail]
elif type(key) is slice:
# return [self[i] for i in range(*key.indices(self.len))]
return SlicedContainer(self, *key.indices(self.len))
else:
if key >= self.len:
raise IndexError
if not self.data:
return self.get_data(key)
if self.data[key] is None:
self.data[key] = self.get_data(key)
return self.data[key]
# else:
# raise TypeError
def get_data(self, key):
return self.it[key]
class SlicedContainer(ContainerBase):
def __init__(self, it, start=None, stop=None, step=None):
super().__init__(it)
self.start = start or 0
self.stop = stop or self.len
self.step = step or 1
self.last = self.stop - (self.stop - self.start) % self.step
self.len = math.ceil((self.stop - self.start) / self.step)
def get_data(self, key):
if 0 <= key < self.len:
k = self.start + key * self.step
elif -self.len <= key < 0:
k = self.last + (1 + key) * self.step
else:
raise IndexError
return self.it[k]
class MemoizeMapList(ContainerBase):
''' 入力イテラブルを加工するイテラブルオブジェクト '''
def __init__(self, fn, it, name='', cache=False, cache_path=None):
self.name = name
self.fn = fn
self.it = it
self.len = len(it)
if cache:
self.data = [None] * self.len
else:
self.data = None
if cache_path:
abspath = os.path.abspath(cache_path)
os.makedirs(abspath, exist_ok=True)
self.cache_path = abspath
else:
self.cache_path = None
def get_data(self, key):
if self.cache_path:
if self.name:
file = f'cache_{self.name}_{key}.npy'
else:
file = f'cache_{key}.npy'
path = os.path.join(self.cache_path, file)
if os.path.isfile(path):
if DEBUG0:
print(f'load(cache) {key}/{self.len}', ' '*20, end='\r')
return np.load(path)
else:
data = self.load_data(key)
np.save(path, data)
return data
else:
return self.load_data(key)
def load_data(self, key):
if self.fn:
return self.fn(self.it[key])
else:
return self.it[key]
class MapChain(ContainerBase):
''' 入力イテラブルを加工するイテラブルオブジェクト
複数のイテラブルを連結
'''
def __init__(self, fn, *its, name=''):
self.name = name
self.fn = fn
self.its = its
self.lens = list(map(len, its))
self.len = sum(self.lens)
self.data = None
def get_data(self, key):
if self.fn:
return self.fn(self.point(key))
else:
return self.point(key)
def point(self, key):
if key < 0:
key += self.len
for i, n in enumerate(self.lens):
if key < n:
return self.its[i][key]
key -= n
print(key, self.lens)
raise IndexError
################################################################################
# データを加工(オリジナル→) # frame => (H, W, C=[u, v, p, f, w])
################################################################################
class Formatter(object):
def __init__(self, vmin, vmax):
self.vmin = vmin
self.vmax = vmax
def __call__(self, frame):
a = frame[:, :, :2]
a = (a - self.vmin) / (self.vmax - self.vmin)
return a.transpose(2, 0, 1) # => (H, W, C) -> (C, H, W)
################################################################################
def __test__():
pass
def get_args():
'''
docstring for get_args.
'''
parser = argparse.ArgumentParser()
parser.add_argument('out', nargs='?', default='new_script',
help='Filename of the new script')
parser.add_argument('--force', '-f', action='store_true',
help='Force')
parser.add_argument('--test', '-t', action='store_true',
help='Run as test mode')
args = parser.parse_args()
return args
def main():
'''
docstring for main.
'''
args = get_args()
if args.test:
__test__()
return
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 5,570 | py | 5 | dataset.py | 4 | 0.445367 | 0.442783 | 0 | 207 | 24.173913 | 80 |
acontenti/japanize | 15,745,350,129,036 | b33faae75962238f39c3e63b5855ca2b7bca0ccb | 5d2ea039c1b3c4bb0d62cc2c78c63799ec2685d6 | /japanize_server.py | 2ede5d914faa2438896cc6fe541283c70077c17b | [
"BSD-3-Clause"
]
| permissive | https://github.com/acontenti/japanize | 2f22bac8446bd78e7fd5ced3942c74ac044416f7 | fb9a3ee5b2393bf6466e57cb110b886563489621 | refs/heads/master | 2023-01-24T07:22:13.113227 | 2020-12-01T00:23:33 | 2020-12-01T00:23:33 | 312,388,156 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import jsonify, Flask
import japanize
app = Flask(__name__, static_folder="static")
@app.route("/", defaults={"names": None})
@app.route("/<names>")
def root(names):
if names:
return jsonify(list(map(japanize.transform, names.split(" "))))
else:
return app.send_static_file("index.html")
| UTF-8 | Python | false | false | 327 | py | 10 | japanize_server.py | 8 | 0.64526 | 0.64526 | 0 | 14 | 22.357143 | 71 |
nb-spaceship/ontology-test | 1,967,095,071,765 | d6ce34fbda7453003bf3f9251b81891ddac58c0e | c87cae10a1f821f141fe487c04f8b7ac4f4be9d1 | /test_tool/test/test_neo_param/test_main.py | a6b30e1474e162adeec994da86b955612be187f7 | []
| no_license | https://github.com/nb-spaceship/ontology-test | 27e0669246b03c39591be474c2044227ad0c9553 | 30f240064b8883f9ff978543ef63e30f1af098e6 | refs/heads/master | 2020-03-17T09:24:13.272297 | 2018-08-31T02:30:24 | 2018-08-31T02:30:24 | 133,473,590 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
import re
import ddt
import unittest
import urllib
import urllib.request
import json
import os
import sys
import getopt
import time
import requests
import subprocess
import traceback
sys.path.append('..')
sys.path.append('../..')
import utils.connect
from utils.config import Config
from utils.taskdata import TaskData, Task
from utils.logger import LoggerInstance as logger
from utils.hexstring import *
from utils.error import Error
from utils.parametrizedtestcase import ParametrizedTestCase
from api.apimanager import API
# from utils.api.commonapi import *
# from utils.api.rpcapi import RPCApi
# from utils.api.init_ong_ont import *
# from utils.api.contractapi import *
# from test_governance_api.test_api import nodeCountCheck
from test_neo_param.test_config import test_config
class test_neo_param_1(ParametrizedTestCase):
def test_init(self):
test_config.m_contract_address= API.contract().deploy_contract_full(test_config.deploy_neo, test_config.name, test_config.desc)[0]
API.node().wait_gen_block()
time.sleep(5)
def setUp(self):
logger.open("test_neo_param/" + self._testMethodName+".log",self._testMethodName)
def tearDown(self):
logger.close(self.result())
def test_base_001_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "int","value": "0"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
traceback.print_exc()
logger.print(e.args[0])
def test_abnormal_002_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "int","value": "-1"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_003_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "int","value": "65535"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_004_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "int","value": "65536"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_005_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "string","value": "abc"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_006_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_007_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "bytearray","value": "111122223333"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_008_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "bool","value": "true"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_009_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "int_to_int", "", argvs = [{"type": "bool","value": "false"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
#string to int
def test_normal_010_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "string","value": "qwertyuiop"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_011_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "string","value": "~!@@#$$%^&*()_+-={}|:\"<>?;'[] \\,./"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_012_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "string","value": "abcd1234_))()*(&^$^%#"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_013_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "int","value": "123"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_014_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "bytearray","value": "111122223333"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_015_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "bool","value": "true"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_016_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "string_to_int", "", argvs = [{"type": "bool","value": "false"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_024_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "bool","value": "true"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_025_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "int","value": "0"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_026_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "bool","value": "false"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_027_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "int","value": "1"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_028_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "string","value": "abc"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_029_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_030_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "bool_to_int", "", argvs = [{"type": "bytearray","value": "111122223333"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_031_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "bytearray","value": "1234567890abcdef"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_032_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "bytearray","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_033_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "int","value": "123"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_034_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "string","value": "zxcvbnm!@"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_035_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "bool","value": "true"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_036_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "byte_to_int", "", argvs = [{"type": "bool","value": "false"}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_037_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_37", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '00'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_abnormal_038_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_38", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == 'ff'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_039_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_39", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == 'ffff00'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_046_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_46", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '71776572747975696f70'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_047_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_47", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '7e214040232424255e262a28295f2b2d3d7b7d7c3a223c3e3f3b275b5d205c2c2e2f'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_048_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_48", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '61626364313233345f292928292a28265e245e2523'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_060_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_60", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '01'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_062_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_62", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '00'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
def test_normal_067_invokeFunction(self):
process = False
try:
(process, response) = API.contract().invoke_function(test_config.m_contract_address, "test_67", "", argvs = [{"type": "string","value": ""}], node_index = 0, sleep=1)
rs = response["result"]["Result"] == '1234567890abcdef'
self.ASSERT(process and rs, "")
except Exception as e:
logger.print(e.args[0])
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 15,971 | py | 309 | test_main.py | 180 | 0.652871 | 0.621564 | 0 | 412 | 37.76699 | 211 |
bthate/wtfbot | 15,281,493,649,436 | 4657cd72a41a4b52fcf33f617437d18546c6b41e | 426a8b9f358d59aebace72083c6c5bfa2d00ea0f | /wtf/typ.py | 74f36db95007b3dd6270f4d42368b13bc5704f89 | []
| no_license | https://github.com/bthate/wtfbot | 5bc3790b67ef202ab037c3f62924328f533fde56 | 38406a1f515fefa1bfdcae7ef6bac318dcafb364 | refs/heads/master | 2020-11-27T06:12:14.069206 | 2019-12-22T00:41:46 | 2019-12-22T00:41:46 | 229,334,522 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import importlib
import types
import wtf
import wtf.utl
def get_cls(name):
try:
modname, clsname = name.rsplit(".", 1)
except ValueError:
modname = "wtf"
clsname = name
mod = importlib.import_module(modname)
return getattr(mod, clsname)
def get_name(o):
t = type(o)
if t == types.ModuleType:
return o.__name__
try:
n = "%s.%s" % (o.__self__.__class__.__name__, o.__name__)
except AttributeError:
try:
n = "%s.%s" % (o.__class__.__name__, o.__name__)
except AttributeError:
try:
n = o.__class__.__name__
except AttributeError:
n = o.__name__
return n
def get_type(o):
t = type(o)
if t == type:
return get_vartype(o)
return str(type(o)).split()[-1][1:-2]
def get_clstype(o):
try:
return "%s.%s" % (o.__class__.__module__, o.__class__.__name__)
except AttributeError:
pass
def get_objtype(o):
try:
return "%s.%s" % (o.__self__.__module__, o.__self__.__name__)
except AttributeError:
pass
def get_vartype(o):
try:
return "%s.%s" % (o.__module__, o.__name__)
except AttributeError:
pass
| UTF-8 | Python | false | false | 1,241 | py | 34 | typ.py | 32 | 0.501209 | 0.497985 | 0 | 53 | 22.396226 | 71 |
JoaoFaria025/udemyDjango | 12,034,498,378,480 | b7ac39537916956acdeb510cbdbd707f8dbb0217 | 7784538b4c6fd2f7cc1a228ce99177f9f7e2af5c | /base/views.py | e7cdba6b7c16a76f4c7710cc975833f3dc5bf4eb | []
| no_license | https://github.com/JoaoFaria025/udemyDjango | 0b8e3c2921bb98462540849503a2e81662d4b9ea | 7cfdaee1f5a994b7a23ba334e4da73fa58075484 | refs/heads/main | 2023-07-29T05:01:37.126645 | 2021-08-27T01:45:37 | 2021-08-27T01:45:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.contrib import messages
from .models import Cliente
from .forms import ContatoForm
def index(request):
cliente = Cliente.objects.all()
form = ContatoForm()
messages.success(request,'Email enviado com sucesso')
context = {
'nome' : 'POO',
'cliente':cliente,
'form': form,
}
return render(request,'index.html',context) | UTF-8 | Python | false | false | 414 | py | 5 | views.py | 4 | 0.676329 | 0.676329 | 0 | 16 | 24.9375 | 57 |
matheusalefe/tdd-python | 7,894,149,927,478 | fa0333a259d9461277244a86a86ad6c23a66d391 | 82478001844769e35e9c940cb30a793a5bb8786a | /framework/posts/forms.py | a4ee593e7dfb175bf520168d60409fb54c4d414e | []
| no_license | https://github.com/matheusalefe/tdd-python | 49399350e2ef3946fd42b021c4a978ccef8e9e16 | 0883fed1631fef103d5aaa906b6a8d961bd6a811 | refs/heads/master | 2020-09-08T09:32:56.171909 | 2019-12-11T06:18:44 | 2019-12-11T06:18:44 | 221,095,218 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import Formulario
class PedidoForm(forms.ModelForm):
class Meta:
model = Formulario
fields = [
'nome',
'email',
'data_nascimento',
'senha',
'confirmacao_senha'
] | UTF-8 | Python | false | false | 289 | py | 32 | forms.py | 29 | 0.519031 | 0.519031 | 0 | 13 | 21.307692 | 34 |
bitcoinsSG/QRL | 10,806,137,763,948 | 992792b32692a3cd1bf9109726a019944c6ece52 | 8518699dfa14a328b1f0c9239a244d6202973034 | /qrl/services/PeerManager.py | d7900a0f120eb1c75eccb3f77efecc90e5d6b6de | [
"MIT"
]
| permissive | https://github.com/bitcoinsSG/QRL | 61d43d5766e4a5d966c53ee81a49fe97a204e5b6 | 88960b1ebbcfaaddc2a2b83ce069d5597ec99654 | refs/heads/master | 2021-01-19T07:34:03.526396 | 2017-11-19T21:27:44 | 2017-11-19T21:27:44 | 100,640,901 | 2 | 0 | null | true | 2017-08-17T20:05:24 | 2017-08-17T20:05:24 | 2017-08-17T19:47:44 | 2017-08-16T09:50:21 | 4,047 | 0 | 0 | 0 | null | null | null | import threading
from time import time, sleep
import grpc
from qrl.core import logger
from qrl.generated import qrl_pb2
class PeerMetadata(object):
DISCOVERY_THRESHOLD_SECS = 20
STABILITY_THRESHOLD_SECS = 5
def __init__(self, conn_addr):
self.connected_since = time()
self.peers_refreshed_at = None
self.conn_addr = conn_addr
self.channel = grpc.insecure_channel(conn_addr)
self.stub = qrl_pb2.P2PNodeStub(self.channel)
self.node_info = None
@property
def peers_needs_refresh(self):
return self.peers_refreshed_at is None or time() - self.peers_refreshed_at > self.DISCOVERY_THRESHOLD_SECS
@property
def is_stable(self):
return (time() - self.connected_since > self.STABILITY_THRESHOLD_SECS) and self.node_info is not None
class PeerManager(object):
TIMEOUT_SECS = 2
REFRESH_CYCLE_SECS = 3
def __init__(self, qrlnode):
self._peers = dict()
self.qrlnode = qrlnode
self._lock = threading.Lock()
self.thread = threading.Thread(target=self._maintain_peers)
self.thread.daemon = True
self.thread.start()
# TODO: Create a bloom filter (as a black list) to avoid frequent reconnections
# TODO: Define a banning time to avoid reconnecting to certain ips
@property
def peer_count(self):
with self._lock:
return len(self._peers)
@property
def stable_peer_count(self):
with self._lock:
return sum(1 for v in self._peers.values() if v.is_stable)
def add(self, addr_list):
# FIXME Get new peers every time there is a new connection
with self._lock:
# FIXME: Limit amount of connections
# FIXME: Check banning before adding
for new_peer_ip in addr_list:
if new_peer_ip in ['127.0.0.1', '0', '0.0.0.0']:
continue
new_peer_conn_addr = '{}:9009'.format(new_peer_ip)
if new_peer_conn_addr not in self._peers:
self._peers[new_peer_conn_addr] = PeerMetadata(new_peer_conn_addr)
def remove(self, conn_list):
with self._lock:
for peer_conn in conn_list:
self._peers.pop(peer_conn, None)
def recycle(self):
with self._lock:
# FIXME: Flush old connections to promote change, swap peers, etc. Use hash logic
pass
def stable_peers(self) -> list:
with self._lock:
return [v for v in self._peers.values() if v.is_stable]
def _all_peers(self):
# FIXME: Improve this. Make a temporary copy for now
with self._lock:
tmp = list(self._peers.values())
return iter(tmp)
def _add_peers_callback(self, response_future):
if response_future.code() == grpc.StatusCode.OK:
res = response_future.result()
response_future.pm.node_info = res.node_info
peer_list = (peer.ip for peer in res.known_peers)
self.add(peer_list)
# TODO: check version/network_id or remove node
# res.node_info.version
# res.node_info.network_id
else:
self.remove([response_future.pm.conn_addr])
def _update_state_callback(self, respose_future):
if respose_future.code() == grpc.StatusCode.OK:
res = respose_future.result()
respose_future.pm.node_info = res.node_info
# TODO: check version/network_id or remove node
# res.node_info.version
# res.node_info.network_id
else:
self.remove([respose_future.pm.conn_addr])
def _maintain_peers(self):
while True:
try:
for peer_metadata in self._all_peers():
if peer_metadata.peers_needs_refresh:
f = peer_metadata.stub.GetKnownPeers.future(qrl_pb2.GetKnownPeersReq(),
timeout=PeerManager.TIMEOUT_SECS)
f.pm = peer_metadata
f.add_done_callback(self._add_peers_callback)
else:
f = peer_metadata.stub.GetNodeState.future(qrl_pb2.GetNodeStateReq(),
timeout=PeerManager.TIMEOUT_SECS)
f.pm = peer_metadata
f.add_done_callback(self._update_state_callback)
# FIXME: QRLNode should probably depend on this
tmp = []
for peer_metadata in self.stable_peers():
addr = peer_metadata.conn_addr.split(':')[0]
tmp.append(addr)
#self.qrlnode.update_peer_addresses(tmp)
sleep(self.REFRESH_CYCLE_SECS)
self.recycle()
except Exception as e:
logger.exception(e) | UTF-8 | Python | false | false | 4,976 | py | 64 | PeerManager.py | 60 | 0.562098 | 0.556672 | 0 | 137 | 35.328467 | 114 |
OSTnm/tiny_mdb | 16,303,695,888,482 | cdd15969877945abe34771e96fb3b76431005ad0 | eb90902a1d6d941019a91b2cf385085869229811 | /thirdparty/dbfread/__init__.py | b6a3f76090536403f53d658c5e299cd61a9ca7b9 | [
"Apache-2.0"
]
| permissive | https://github.com/OSTnm/tiny_mdb | 25d3d7c4e3570d43544049b74bdcbff5ec111823 | 4871f3bd6c3c6c47962d5d9e4c0e0070949ce6ae | refs/heads/master | 2021-01-08T01:27:14.781341 | 2020-02-25T09:51:10 | 2020-02-25T09:51:10 | 241,872,163 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Read DBF files with Python.
Example:
>>> from dbfread import DBF
>>> for record in DBF('people.dbf'):
... print(record)
OrderedDict([('NAME', 'Alice'), ('BIRTHDATE', datetime.date(1987, 3, 1))])
OrderedDict([('NAME', 'Bob'), ('BIRTHDATE', datetime.date(1980, 11, 12))])
Full documentation at http://dbfread.readthedocs.org/
"""
__author__ = 'Ole Martin Bjorndalen'
__email__ = 'ombdalen@gmail.com'
__url__ = 'http://nerdly.info/ole/'
__license__ = 'MIT'
__version__ = '2.0.4'
from .dbf import DBF
from .deprecated_dbf import open, read
from .exceptions import *
from .field_parser import FieldParser, InvalidValue
# Prevent splat import.
__all__ = []
| UTF-8 | Python | true | false | 685 | py | 15 | __init__.py | 11 | 0.640876 | 0.616058 | 0 | 27 | 24.37037 | 78 |
lastmikoi/python-project-boilerplate | 558,345,782,955 | 470cd594f6df8c5ca5967f36434dd8451c4567e9 | 25848265586460ed8966bc6aa94efcd6cd220bd8 | /setup.py | a9bebbbe7d44e407307a39fc3bcc241987c8d315 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | https://github.com/lastmikoi/python-project-boilerplate | c4983d6451ebb7fa216257c7e148372ccd6ced15 | b58fe4dd19d8bd80b9c9be5c47f2bf95e8664153 | refs/heads/master | 2020-04-09T01:08:14.291870 | 2018-12-01T00:18:44 | 2018-12-01T00:33:13 | 159,893,115 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # --------------------------------License Notice----------------------------------
# Python Project Boilerplate - A boilerplate project for python packages
#
# Written in 2018 by Mickaël 'lastmikoi' FALCK <lastmikoi@lastmikoi.net>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
# --------------------------------License Notice----------------------------------
"""Setuptools-backed setup module."""
import codecs
import os
import setuptools
if __name__ == '__main__':
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
# Use README.rst as source for setuptools.setup's long_description param
with codecs.open(os.path.join(ROOT_DIR, 'README.rst'),
encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setuptools.setup(
# Distutils parameters
name='project_name',
description='Placeholder description',
long_description=LONG_DESCRIPTION,
author="Mickaël 'lastmikoi' FALCK",
author_email='lastmikoi@lastmikoi.net',
url='https://github.com/lastmikoi/python-project-boilerplate/',
packages=setuptools.find_packages(exclude=['tests']),
classifiers=[
'Programming Language :: Python :: 3',
],
license='CC0 1.0 Universal',
keywords='python project boilerplate skeleton template',
# Setuptools parameters
include_package_data=True,
install_requires=[
],
extras_require={
'dev': [
'ipython>=7.2.0,<8',
],
'test': [
'tox>=3.5.3,<4',
'pytest>=4.0.1,<5',
'pytest-mock>=1.10.0,<2',
],
},
python_requires='>=3.6,<4',
setup_requires=['setuptools_scm'],
# setuptools_scm parameters
use_scm_version=True,
)
| UTF-8 | Python | false | false | 2,189 | py | 3 | setup.py | 1 | 0.575674 | 0.561043 | 0 | 63 | 33.714286 | 82 |
Hybbon/era-scripts | 17,600,776,016,237 | 676f0a7c2703175af3d3d52b362c02f7032df406 | b4521a5e1b668bed9f253d222092313895753da5 | /recommenders/cofactor/run_cofactor.py | 8572452b65bf161a50553b45f7b6408001d7c76d | []
| no_license | https://github.com/Hybbon/era-scripts | 0b3bd41e76c11305803a193e8aa9830695285a1e | 405e8e77787d0ed641d9e31f5c4ac59ca7ecfd37 | refs/heads/master | 2021-03-19T14:25:49.913508 | 2017-12-12T16:46:39 | 2017-12-12T16:46:39 | 72,033,547 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import CoF_pre_process as pre_CoF
import Cofactorization as CoF
import argparse
import os
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("data", type=str,
help="path to the input files")
p.add_argument("-o'", "--output_dir", type=str, default="",
help="output directory (default: same as input file)")
p.add_argument("-p", "--part", type=str, default="u1",
help="partition used in the training")
p.add_argument("--proc_folder", type=str, default="pro",
help="folder containing the processed files used by CoFactor")
p.add_argument("-v", "--no_validation", action='store_true',
help="if specified, no validation folds are generated")
p.add_argument("-c", "--config", type=str, default="",
help="name of an override config file. (default: none)")
parsed_p = p.parse_args()
if parsed_p.output_dir == "":
parsed_p.output_dir = parsed_p.data
return parsed_p
if __name__ == '__main__':
args = parse_args()
print "-----------------------------"
if 'reeval' in args.data:
print(args)
args.no_validation = True
pre_CoF.run(args)
CoF.run(args) #pass args
os.system('rm -r {0}*'.format(os.path.join(args.data,args.proc_folder,args.part)))
| UTF-8 | Python | false | false | 1,317 | py | 57 | run_cofactor.py | 49 | 0.594533 | 0.593014 | 0 | 43 | 29.627907 | 86 |
tim-schilling/bettersimple | 19,043,885,026,310 | ce595cfac7cdf61cbe8624a54477012c9d22c617 | 853c6a1b6d17fd24c18875af7cc17d4750d4b9d7 | /charge/urls.py | e3fd480c5957ee9c66909d67147e518ffcda75fc | []
| no_license | https://github.com/tim-schilling/bettersimple | f0db73bfac07a0879229ed335d8462ed151dbbb7 | 19abbb72db2998f7e83874d905570e309686e854 | refs/heads/master | 2019-01-02T03:58:40.167777 | 2013-11-01T00:31:37 | 2013-11-01T00:31:37 | 13,758,820 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, include, url
from .views import edit_credit_card, billing, past_charge, stripe_webhook, process_monthly_charges
urlpatterns = patterns(
'charge.views',
url(r'^(?i)creditcard', edit_credit_card, name="edit_card_card"),
url(r'^(?i)billing', billing, name="billing"),
url(r'^(?i)past_charge/(\d{4})/(\d{2})/$', past_charge, name='past_charge'),
url(
r'^(?i)api/stripe_webhook/(?P<token>([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})+)/$',
stripe_webhook, name='stripe_webhook'),
url(
r'^(?i)api/process_monthly_charges/(?P<token>([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})+)/$',
process_monthly_charges, name='process_monthly_charges'),
)
| UTF-8 | Python | false | false | 797 | py | 71 | urls.py | 37 | 0.602258 | 0.559598 | 0 | 15 | 52.133333 | 136 |
pabloduque0/handwrittingOCR | 12,919,261,635,390 | 55c3fa3cfd889737e5fedec99dfda74f246f5e76 | 7ce026a15234d64eb278849a03a8f89548276373 | /SVM_OCR.py | 6828445a1295210ee4dc6b09799c40421611f311 | []
| no_license | https://github.com/pabloduque0/handwrittingOCR | ad97b2ff063b2f697dd2a2f8da5971ee42a0dd72 | e8c93d55ef24f56314092dcd70636db19d574702 | refs/heads/master | 2021-06-19T15:08:24.978778 | 2017-06-18T20:34:46 | 2017-06-18T20:34:46 | 94,634,671 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
# In this test we will use SVM instead of kNN
size=20
bin_n = 16 # Number of bins
svm_params = dict( kernel_type = cv2.SVM_LINEAR,
svm_type = cv2.SVM_C_SVC,
C=2.67, gamma=5.383 )
thisFlags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR
# First deskew image
def deskew(image):
myMoments = cv2.moments(image)
if abs(myMoments['mu02']) < 1e-2:
return image.copy()
skew = myMoments['mu11']/myMoments['mu02']
M = np.float32([[1, skew, -0.5*size*skew], [0, 1, 0]])
image = cv2.warpAffine(image,M,(size, size),flags=thisFlags)
return image
# HOG function
def hog(image):
gx = cv2.Sobel(image, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(image, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16)
bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist is a 64 bit vector
return hist
def GaussianFilter(sigma):
halfSize = 3 * sigma
maskSize = 2 * halfSize + 1
mat = np.ones((maskSize,maskSize)) / (float)( 2 * np.pi * (sigma**2))
xyRange = np.arange(-halfSize, halfSize+1)
xx, yy = np.meshgrid(xyRange, xyRange)
x2y2 = (xx**2 + yy**2)
exp_part = np.exp(-(x2y2/(2.0*(sigma**2))))
mat = mat * exp_part
return mat
image = cv2.imread('digits.png',0)
# If desired, image can be previously smoothed
gaussianFilter = GaussianFilter(1)
gaussianGray1 = cv2.filter2D(image, -1, gaussianFilter)
cells = [np.hsplit(row,100) for row in np.vsplit(image,50)]
# First half is trainData, remaining is testData
train_cells = [ i[:50] for i in cells ]
test_cells = [ i[50:] for i in cells]
# Training data
deskewedImage = [map(deskew,row) for row in train_cells]
hogData = [map(hog,row) for row in deskewedImage]
trainData = np.float32(hogData).reshape(-1,64)
dataResponses = np.float32(np.repeat(np.arange(10),250)[:,np.newaxis])
svm = cv2.SVM()
svm.train(trainData,dataResponses, params=svm_params)
svm.save('svm_data.dat')
# Testing data
deskewedImage = [map(deskew,row) for row in test_cells]
hogData = [map(hog,row) for row in deskewedImage]
testData = np.float32(hogData).reshape(-1,bin_n*4)
result = svm.predict_all(testData)
# Checking accuracy
mask = result==dataResponses
correct = np.count_nonzero(mask)
print correct*100.0/result.size | UTF-8 | Python | false | false | 2,586 | py | 4 | SVM_OCR.py | 3 | 0.649652 | 0.595901 | 0 | 84 | 29.797619 | 92 |
thecut/thecut-forms | 1,168,231,122,062 | 3bb10ecda4753c17da3104ea8a2775c7f1860d42 | 14479931c39afa19b345bd15dd7286e0a917615b | /thecut/forms/forms.py | f39685e0932ee56601187f36db3872232eec275d | [
"Apache-2.0"
]
| permissive | https://github.com/thecut/thecut-forms | d4c208a885428763ebb29345af8a2cd42e52851b | a0aefa87c67124eb033525b1d3d5dedcd0a3ed70 | refs/heads/master | 2021-01-17T07:58:15.822239 | 2017-05-23T02:34:27 | 2017-05-23T02:34:27 | 28,124,893 | 0 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from thecut.forms.utils import add_css_class
class EmailTypeMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that sets the HTML5
``email`` input type on any child :py:class:`~django.forms.EmailField`
instances."""
def __init__(self, *args, **kwargs):
super(EmailTypeMixin, self).__init__(*args, **kwargs)
# Set HTML5 input type for email fields
for field in self.fields.values():
if isinstance(field, forms.EmailField):
field.widget.input_type = 'email'
class RequiredMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that sets the HTML5
``required`` attribute on any child :py:class:`~django.forms.Field`
instances that is required.
This mixin does not apply the `required` attribute to fields using
:py:class:`~django.forms.RadioSelect` and
:py:class:`~django.forms.CheckboxSelectMultiple` as
the HTML5 ``required`` attribute does not behave as (usually) expected on
these widgets.
"""
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super(RequiredMixin, self).__init__(*args, **kwargs)
# Set HTML5 required attributes. Note that if we set the required
# attribute on fields with certain widgets, it will cause the form to
# break by requiring EVERY option to be selected. This is not possible
# with the RadioSelect widget, and in most cases won't be the desired
# behaviour with the CheckboxSelectMultiple widget. If it is, the
# required attribute of the widget can still be set manually in the
# form.
for field in self.fields.values():
if field.required and not (
isinstance(field.widget, forms.CheckboxSelectMultiple) or
isinstance(field.widget, forms.RadioSelect)):
field.widget.attrs.update({'required': 'required'})
class MaxLengthMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that sets the HTML5
``maxlength`` attribute on any child :py:class:`~django.forms.Field`
instances using the :py:class:`~django.forms.Textarea` widget.
A ``max_length`` must be specified on the :py:class:`~django.forms.Field`.
"""
def __init__(self, *args, **kwargs):
super(MaxLengthMixin, self).__init__(*args, **kwargs)
# HTML5 maxlength attribute for textarea
for field in self.fields.values():
if isinstance(field.widget, forms.Textarea) and field.max_length:
field.widget.attrs.update({'maxlength': field.max_length})
class PlaceholderMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that allows you to easily set
the HTML5 ``placeholder`` widget on a child
:py:class:`~django.forms.Field`.
To add a ``placeholder`` to a :py:class:`~django.forms.Field`, specify it
in a ``placeholders`` ``dict`` on the :py:class:`~django.forms.Form`'s
:py:class:`~django.forms.Form.Meta` class. For example::
class MyForm(forms.Form):
foo = forms.CharField()
class Meta(object):
placeholders = {
'foo': 'Enter some text here.'
}
"""
class Meta(object):
placeholders = {}
def __init__(self, *args, **kwargs):
super(PlaceholderMixin, self).__init__(*args, **kwargs)
for key, value in dict(getattr(self.Meta, 'placeholders', {})).items():
self.fields[key].widget.attrs.update({'placeholder': value})
class TimeClassMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that adds a ``time`` CSS
class on any child :py:class:`~django.forms.Field` instances using the
:py:class:`~django.forms.TimeInput` widget.."""
def __init__(self, *args, **kwargs):
super(TimeClassMixin, self).__init__(*args, **kwargs)
# HTML5 input types and attributes
for field in self.fields.values():
if isinstance(field.widget, forms.TimeInput):
add_css_class(field.widget, 'time')
class DateClassMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that adds a ``date`` CSS
class on any child :py:class:`~django.forms.Field` instances using the
:py:class:`~django.forms.DateInput` widget.."""
def __init__(self, *args, **kwargs):
super(DateClassMixin, self).__init__(*args, **kwargs)
# HTML5 input types and attributes
for field in self.fields.values():
if isinstance(field.widget, forms.DateInput):
add_css_class(field.widget, 'date')
class DateTimeClassMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that adds a ``datetime`` CSS
class on any child :py:class:`~django.forms.Field` instances using the
:py:class:`~django.forms.DateTimeInput` widget.."""
def __init__(self, *args, **kwargs):
super(DateTimeClassMixin, self).__init__(*args, **kwargs)
# HTML5 input types and attributes
for field in self.fields.values():
if isinstance(field.widget, (forms.DateTimeInput,
forms.SplitDateTimeWidget)):
add_css_class(field.widget, 'datetime')
class DateTimeTimezoneMixin(object):
"""A mixin for a :py:class:`~django.forms.Form` that adds ``help_text``
to any child :py:class:`~django.forms.Field` instances using the
:py:class:`~django.forms.DateTimeInput` widget.
This help text contains the timezone for the field's recorded data (if
any)).
"""
def __init__(self, *args, **kwargs):
super(DateTimeTimezoneMixin, self).__init__(*args, **kwargs)
self._set_timezone_help_texts(data=self.initial)
def _set_timezone_help_texts(self, data):
for field_name, field in self.fields.items():
field_data = data.get(field_name)
if field_data and isinstance(field.widget, forms.DateTimeInput):
field.help_text = field_data.tzname()
def clean(self, *args, **kwargs):
cleaned_data = super(DateTimeTimezoneMixin, self).clean(*args,
**kwargs)
self._set_timezone_help_texts(data=cleaned_data)
return cleaned_data
class FormMixin(DateTimeClassMixin, DateClassMixin, EmailTypeMixin,
MaxLengthMixin, PlaceholderMixin, RequiredMixin,
TimeClassMixin):
"""Form mixin.
Used to extend a standard Django :py:class:`~django.forms.Form` class with
useful/common behaviour.
"""
error_css_class = 'error'
label_suffix = ''
| UTF-8 | Python | false | false | 6,774 | py | 16 | forms.py | 6 | 0.628137 | 0.626366 | 0 | 176 | 37.488636 | 81 |
briantoe/MachineLearning | 6,682,969,144,018 | a31e013bfa7c909cd189ecef4ee6ad9c3c2a38ed | 75b2d28b683033e50c9ed5407412fa98748db9e4 | /ps2/problem3/problem1.py | ee896f43cfade2cb376c714c0fac58ba550b7497 | []
| no_license | https://github.com/briantoe/MachineLearning | 361cc4934649c14a70cf6c77ed2e1791a8ef9d9e | adbe85c1b45e38796464b50d7b39f28276846683 | refs/heads/master | 2020-07-18T10:16:34.420312 | 2019-11-23T06:12:54 | 2019-11-23T06:12:54 | 206,228,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
class Question:
def __init__(self, col, val):
self.col = col
self.val = val
def match(self, inputt): # puts given input into the question and evaulates boolean value
val = inputt[self.col]
return val == self.val
class Leaf:
def __init__(self, rows):
self.prediction = class_counts(rows)
class DecisionNode:
def __init__(self, question, true_branch, false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
def read_data(filename):
data = []
with open(filename) as file:
for line in file:
data_line = [item.replace('\n', '') for item in line.split(',')]
data.append(data_line)
# data = np.array(data)
return data
# labels = data[:, 0] # grab labels
# dim = len(data[0]) # dimension of the data
# data = data[:, 1:dim] # grab vectors
# return (data, labels)
def unique_values(rows, cols):
return set([row[cols] for row in rows])
def find_best_split(rows):
best_gain = 0
best_question = None
uncertain = gini(rows)
feat_num = len(rows[0] )- 1
for col in range(feat_num):
values = unique_values(rows, col)
for val in values:
question = Question(col,val)
true_rows, false_rows = partition(rows, question)
if len(true_rows) == 0 or len(false_rows) == 0: # does not divide dataset
continue
gain = information_gain(true_rows, false_rows, uncertain)
if(gain > best_gain):
best_gain = gain
best_question = question
return best_gain, best_question
def class_counts(rows):
counts = {}
for item in rows:
label = item[0] # grab the label off of the datapoint
if label not in counts: # this counts the amount of data per classifier, stores in a dict
# to reference for each classifier
counts[label] = 0
counts[label] += 1
return counts
def information_gain(left,right, cur_uncertain):
p = float(len(left))/ (len(left) + len(right))
return cur_uncertain - p * gini(left) - (1-p) * gini(right)
def gini(row): # rows
counts = class_counts(row)
impurity = 1
for lbl in counts:
prob_of_lbl = counts[lbl] / float(len(row))
impurity -= prob_of_lbl**2
return impurity
def partition(rows, question):
true_rows, false_rows = [], []
for row in rows:
if question.match() == True:
true_rows.append(row)
else:
false_rows.append(row)
return true_rows, false_rows
def build_tree(rows):
gain, question = find_best_split(rows)
if gain == 0:
return Leaf(rows)
true_rows, false_rows = partition(rows, question)
true_branch = build_tree(true_rows)
false_branch = build_tree(false_rows)
return DecisionNode(question, true_branch, false_branch)
def classify(row, node):
if isinstance(row, Leaf):
return node.prediction
if node.question.match(row):
return classify(row, node.true_branch)
else:
return classify(row, node.false_branch)
def main():
# attribute_types = ['cap-shape', 'cap-surface', 'cap-color', 'bruises?', 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color', 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat']
filename = "mush_train.data"
labels_and_data = read_data(filename)
# print(str(labels_and_data))
filename = "mush_test.data"
test_labels_and_data = read_data(filename)
print(str(test_labels_and_data))
# tree = build_tree(labels_and_data[0])
# print(classify(labels_and_data[0], tree))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 4,028 | py | 45 | problem1.py | 23 | 0.596822 | 0.592602 | 0 | 145 | 26.77931 | 385 |
bornhack/bornhack-website | 14,096,082,666,200 | 33c5d76fb5eae9abb7c316ca0bed722c5446615e | 716e1f229eab5b5087752ebf32c3370a87958798 | /src/shop/migrations/0060_productcategory_weight.py | 3791ddf24dddd32ff917e723cd05c669a00c9a88 | [
"BSD-3-Clause"
]
| permissive | https://github.com/bornhack/bornhack-website | 2e8810e00f7a48a8a7e82f02f66d67edcce6e404 | 767deb7f58429e9162e0c2ef79be9f0f38f37ce1 | refs/heads/master | 2023-08-31T23:31:04.344829 | 2023-08-08T21:59:19 | 2023-08-08T21:59:19 | 43,702,500 | 9 | 43 | BSD-3-Clause | false | 2023-09-11T09:46:14 | 2015-10-05T17:45:55 | 2023-08-08T18:32:10 | 2023-09-11T09:46:09 | 177,417 | 7 | 28 | 93 | Python | false | false | # Generated by Django 2.2.3 on 2019-12-16 22:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("shop", "0059_auto_20190718_2051"),
]
operations = [
migrations.AddField(
model_name="productcategory",
name="weight",
field=models.IntegerField(
default=100,
help_text="Sorting weight. Heavier items sink to the bottom.",
),
),
]
| UTF-8 | Python | false | false | 501 | py | 876 | 0060_productcategory_weight.py | 545 | 0.558882 | 0.491018 | 0 | 21 | 22.857143 | 78 |
simplehonestwork/django-clortho | 18,425,409,704,394 | fe7ceda58157e9d8c4fbc51166ced98f952faca5 | 098cd0dcf98ad157804c60dc2b50b0e5772b9173 | /clortho/auth_backends.py | 35ef08556e564eb4d9d32ba23fe0397502d145d3 | []
| no_license | https://github.com/simplehonestwork/django-clortho | 9347575bf5c49689f078c7bc6431ef76fa5c786d | 11a006b627900f4b0b70a0ea870d6256ef0da9d5 | refs/heads/master | 2021-01-17T22:20:15.132489 | 2011-11-10T21:39:37 | 2011-11-10T21:39:37 | 2,743,111 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
from django.contrib.auth.models import User
from django.conf import settings
import facebook
from clortho.models import Facebook
class FacebookBackend:
def authenticate(self, cookies):
APP_ID = settings.FACEBOOK_APP_ID
SECRET_KEY = settings.FACEBOOK_SECRET_KEY
NS = settings.FACEBOOK_USER_NAMESPACE
try:
access_token = facebook.get_user_from_cookie(cookies, APP_ID, SECRET_KEY)
token_not_expired = datetime.fromtimestamp(float(access_token['expires'])) > datetime.now()
if 'fbs_' + APP_ID in cookies and token_not_expired:
graph = facebook.GraphAPI(access_token['access_token'])
user_info = graph.get_object('me')
try:
facebook_data = Facebook.objects.get(uid=user_info['id'])
return facebook_data.user
except Facebook.DoesNotExist:
try:
email = user_info['email']
except:
email = user_info['id'] + '@dummyfbemail.com'
user = User.objects.create(username=NS + user_info['id'],
email=email)
user.first_name = user_info['first_name']
user.last_name = user_info['last_name']
user.save()
# New users get an unusable password.
if settings.FACEBOOK_SET_UNUSABLE_PASSWORD:
user.set_unusable_password()
facebook_data = Facebook(uid=user_info['id'],
url=user_info['link'], user=user)
facebook_data.save()
return user
else:
return None
except:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except:
return None | UTF-8 | Python | false | false | 2,040 | py | 11 | auth_backends.py | 8 | 0.511275 | 0.511275 | 0 | 50 | 39.82 | 103 |
django-stars/cfb | 10,187,662,464,299 | 90a0fe87e219d50f322aa3049399972051541089 | 1f8f002b1f9bba24dad226578b3a931693850fd2 | /tests/test_header.py | 8d6a8c7f2afb3a47ab42964ea5136d15de18829a | [
"BSD-2-Clause"
]
| permissive | https://github.com/django-stars/cfb | 408c5413f0164b96395af238861c505382bcd70c | afe2a0af7ac6dd99c4c2cea730ca1c34707bbc16 | refs/heads/master | 2021-01-21T17:06:50.462633 | 2016-04-26T08:52:34 | 2016-04-26T08:52:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os import SEEK_END, SEEK_SET
from six import b, BytesIO
from unittest import TestCase
from cfb.exceptions import MaybeDefected, ErrorDefect, FatalDefect, \
WarningDefect
from cfb.header import Header
class SourceMock(BytesIO, MaybeDefected):
def __init__(self, value="", raise_if=ErrorDefect):
super(SourceMock, self).__init__(b(value))
MaybeDefected.__init__(self, raise_if=raise_if)
def append(self, data):
self.write(b(data))
self.seek(0)
return self
def erase(self, till=0):
self.seek(till, SEEK_END if till < 0 else SEEK_SET)
self.truncate(self.tell())
return self
class HeaderTestCase(TestCase):
def test_main(self):
source = SourceMock(raise_if=WarningDefect)
self.assertRaises(FatalDefect, Header, source)
self.assertRaises(FatalDefect, Header, source.append("12345678"))
self.assertRaises(
ErrorDefect, Header,
source.erase().append("\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1"))
self.assertRaises(ErrorDefect, Header, source.append('1' * 16))
self.assertRaises(FatalDefect, Header,
source.erase(-16).append('\0' * 16))
self.assertRaises(ErrorDefect, Header,
source.append("1234567890"))
self.assertRaises(WarningDefect, Header,
source.erase(-10).append("12\x04\x00345678"))
self.assertRaises(FatalDefect, Header,
source.erase(-10).append("\x3e\x00\x04\x00123456"))
self.assertRaises(ErrorDefect, Header,
source.erase(-6).append("\xfe\xff1234"))
self.assertRaises(ErrorDefect, Header,
source.erase(-4).append("\x09\x0012"))
self.assertRaises(
ErrorDefect, Header,
source.erase(-8).append("\x03\x00\xfe\xff\x0c\x0012"))
self.assertRaises(ErrorDefect, Header,
source.erase(-4).append("\x09\x0012"))
self.assertRaises(ErrorDefect, Header,
source.erase(-2).append("\x06\x00"))
self.assertRaises(ErrorDefect, Header, source.append("1" * 6))
self.assertRaises(FatalDefect, Header,
source.erase(-6).append('\0' * 6))
self.assertRaises(ErrorDefect, Header,
source.append("1234" + '\0' * 32))
self.assertRaises(ErrorDefect, Header,
source.erase(-36).append('\0' * 36))
self.assertEqual(
Header(source.erase(-20).append('\x00\x10' + '\0' * 18)).version,
(3, 0x3e))
| UTF-8 | Python | false | false | 2,669 | py | 15 | test_header.py | 13 | 0.581117 | 0.532784 | 0.01124 | 72 | 36.069444 | 77 |
avogis/messenger | 11,244,224,431,301 | af43795b4d27c96e0745d7207b5ea6d6c048b287 | 7d65a982c99b49d0727aa346de103196f127e8d5 | /tests/tests.py | 8275262bc197fcb2b19c96cc7cad3e372fec7af4 | []
| no_license | https://github.com/avogis/messenger | 1451c2cbec5612787a29233ab8debf4040808d02 | cfe90005cdfe6df0fd6763fcc8271beaa752c41b | refs/heads/master | 2020-03-28T20:05:51.218719 | 2017-06-20T18:53:58 | 2017-06-20T18:53:58 | 94,607,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
import json
from messenger.api import User, Message, user_dict, messages_dict
class TestModelToDict(object):
def test_usr_dict(self):
timestamp = datetime.utcnow()
user = User('test', 'l@a', created=timestamp, last_checked=timestamp)
dict_user = user_dict(user)
expected_dict = {
"username": 'test',
"email": 'l@a',
"created": str(timestamp),
"last_checked": str(timestamp),
}
assert dict_user == expected_dict
def test_message_to_dict(self):
user = User('test', 'l@a',)
created = datetime.utcnow()
message = Message('hej', user, created=created)
dict_message = messages_dict([message])
expected_dict = {
message.id: {
"text": 'hej',
"created": str(created)
},
}
assert dict_message == expected_dict
def test_messages_to_dict(self, sqldb):
user = User('test', 'l@a')
created = datetime.utcnow()
message = Message('hej', user, created=created)
messages = [message, message, message]
expected_dict = {
message.id: {
"text": 'hej',
"created": str(created)
},
message.id: {
"text": 'hej',
"created": str(created)
},
message.id: {
"text": 'hej',
"created": str(created)
}
}
dict_message = messages_dict(messages)
assert dict_message == expected_dict
class TestMessageApi(object):
def test_get_user_does_not_exist(self, app):
response = app.get('/messages/test/1')
assert response.status_code == 404
def test_get_message_does_not_exist(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
response = app.get('/messages/test/1')
assert response.status_code == 404
def test_get_message(self, app, sqldb):
user = User('test', 'test@gmail.com')
created = datetime.utcnow()
message = Message('Hi', user, created=created)
sqldb.session.add(user)
sqldb.session.add(message)
sqldb.session.commit()
response = app.get('/messages/test/1')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == '{"1": {"text": "Hi", "created": "' + str(created) + '"}}'
def test_delete_message_user_does_not_exist(self, app):
response = app.delete('/messages/test/1')
assert response.status_code == 404
def test_message_does_not_exist(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
response = app.delete('/messages/test/1')
assert response.status_code == 404
def test_delete_message(self, app, sqldb):
user = User('test', 'test@gmail.com')
message = Message('Hi', user)
sqldb.session.add(user)
sqldb.session.add(message)
sqldb.session.commit()
assert Message.query.get(message.id) is not None
response = app.delete('/messages/test/{}'.format(message.id))
assert response.status_code == 204
assert Message.query.get(message.id) is None
def test_delete_message_no_such_user(self, app, sqldb):
response = app.delete('/messages/test/1')
assert response.status_code == 404
class TestMessages(object):
def test_get_user_messages(self, app, sqldb):
user = User('test', 'test@gmail.com')
created = datetime.utcnow()
message1 = Message('Hi', user, created=created)
message2 = Message('Where are you', user, created=created)
sqldb.session.add(user)
sqldb.session.add(message1)
sqldb.session.add(message2)
sqldb.session.commit()
response = app.get('/messages/test')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == ('{"1": {"text": "Hi", "created": "' + str(created) + '"}, ' +
'"2": {"text": "Where are you", "created": "' + str(created) + '"}}')
def test_get_user_messages_does_not_exist(self, app, sqldb):
response = app.get('/messages/test')
assert response.status_code == 404
def test_post_new_message_for_a_user(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
message_text = 'Hello'
response = app.post('/messages/test'.format(user.id), data=message_text)
assert response.status_code == 201
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict.get("message_id") == 1
def test_post_new_message_for_a_user_does_not_exist(self, app, sqldb):
message_text = 'Do you exist?'
response = app.post('/messages/test', data=message_text)
assert response.status_code == 404
class TestMessageOffset(object):
def test_start_not_int(self, app):
response = app.get('/messages/test/offset/hej/1')
assert response.status_code == 404
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == "Stop and start indices must be integers"
def test_stop_not_int(self, app):
response = app.get('/messages/test/offset/1/hej')
assert response.status_code == 404
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == "Stop and start indices must be integers"
def test_start_bigger_than_stop(self, app):
response = app.get('/messages/test/offset/5/1')
assert response.status_code == 404
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == "Stopp index must be greater than start index"
def test_no_user(self, app):
response = app.get('/messages/test/offset/1/5')
assert response.status_code == 404
def test_no_messages_for_user(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
response = app.get('/messages/test/offset/1/5')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == "No messages for user test"
def test_get_messages(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
created = datetime.utcnow()
message1 = Message('Hi1', user, created=created)
sqldb.session.add(message1)
message2 = Message('Hi2', user, created=created)
sqldb.session.add(message2)
message3 = Message('Hi3', user, created=created)
sqldb.session.add(message3)
sqldb.session.commit()
expected_dict = {
str(message1.id): {
"text": 'Hi1',
"created": str(created)
},
str(message2.id): {
"text": 'Hi2',
"created": str(created)
}
}
response = app.get('/messages/test/offset/0/2')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == expected_dict
expected_dict = {
str(message2.id): {
"text": 'Hi2',
"created": str(created)
},
str(message3.id): {
"text": 'Hi3',
"created": str(created)
}
}
response = app.get('/messages/test/offset/1/3')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == expected_dict
class TestNewMessages(object):
def test_user_does_not_exist(self, app):
response = app.get('/new_messages/test')
assert response.status_code == 404
def test_no_new_messages_for_a_user(self, app, sqldb):
user = User('test', 'test@gmail.com')
sqldb.session.add(user)
sqldb.session.commit()
response = app.get('/new_messages/test')
resp = response.data.decode('utf-8')
resp_dict = json.loads(resp)
assert resp_dict == "No new messages for user test"
| UTF-8 | Python | false | false | 8,522 | py | 7 | tests.py | 5 | 0.566299 | 0.555151 | 0 | 262 | 31.526718 | 98 |
Yash-Patel01/mysite | 14,379,550,513,320 | c5285c340754e0425c5901bcf9793923ca3e5170 | 9a0f815030ff9721f1a82ffd2498470976546324 | /project/views.py | 52edfdd4089a4b310602c86d359a817c90e2de50 | []
| no_license | https://github.com/Yash-Patel01/mysite | 838cb8904411ed7f5566d72ff87ae2b1c905427e | c415045c4b8f8cf90fa08fde36b1263a6b713eff | refs/heads/master | 2022-11-26T15:16:18.540708 | 2020-03-28T04:51:20 | 2020-03-28T04:51:20 | 248,917,233 | 1 | 1 | null | false | 2022-11-22T05:24:58 | 2020-03-21T06:05:12 | 2020-03-28T04:51:23 | 2022-11-22T05:24:56 | 8,882 | 1 | 1 | 2 | CSS | false | false | from django.shortcuts import render
from .models import course, registration, contact, blog1, comment2, coursecomment
from django.contrib.auth.models import User
from django.http import HttpResponse
import datetime
import urbandictionary as ud
from mysite.settings import EMAIL_HOST_USER
from django.core.mail import send_mail
def index(request):
# word = str(input('Enter the word :'))
cour = course.objects.all()
# defs = ud.define(word)
# # rand = ud.random()
# for i in defs:
# print(i.definition)
return render(request, 'index.html', {'cour': cour})
def blog(request):
blogg = blog1.objects.all()
return render(request, "blog.html", {'blogg': blogg})
def contect1(request):
return render(request, "contact.html")
def course1(request, id):
course_id1 = ''
user_id11 = ''
user_id1 = ''
count1 = 'y'
current_user = request.user
registration1 = registration.objects.filter(user_id=current_user.id, course_id=id)
registration2 = registration.objects.all()
for i in registration2:
user_id2 = i.user_id
course_id2 = i.course_id
for i in registration1:
user_id11 = i.user_id
# course_id1 = i.course_id
# print(course_id1)
# print(user_id1)
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
message = request.POST['message']
id = request.POST['id']
Comment = coursecomment(name=name, email=email,
message=message, idofcourse=id)
Comment.save()
Comment = coursecomment.objects.all()
Count = coursecomment.objects.filter(idofcourse=id).count()
cour = course.objects.get(id=id)
return render(request, 'course.html', {'cour': cour, 'comment': Comment,'count1': count1,'count': Count, 'registration1': registration1, 'registration2': registration2, 'user_id11': user_id11})
def message(request):
id1 = ''
if request.method == 'POST':
id1 = request.POST['id']
contact.objects.filter(id=id1).delete()
mes = contact.objects.all()
return render(request, 'message.html', {'mes': mes})
def contect11(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
Contect = contact(name=name, email=email,
subject=subject, message=message)
Contect.save()
return render(request, "contact.html")
def addblog(request):
if request.method == 'POST':
title = request.POST['title']
name = request.POST['name']
blog = request.POST['blog']
datetime.date.today()
# username = request.user.get_short_name()
Blog = blog1(title=title, name=name, blog=blog)
Blog.save()
return render(request, "addblog.html")
else:
return render(request, "addblog.html")
def blogpost(request, blog):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
message = request.POST['message']
id = request.POST['id']
Comment = comment2(name=name, email=email,
message=message, idofblog=id)
Comment.save()
blogg = blog1.objects.get(id=blog)
Count = comment2.objects.filter(idofblog=blog).count()
comment = comment2.objects.all()
return render(request, 'blog-post.html', {"blog": blogg, "comment": comment, 'count': Count})
def search(request):
name = ''
if request.method == 'POST':
name = request.POST['name']
search1 = User.objects.filter(first_name=name)
return render(request, 'search.html', {'search': search1})
def register1(request, id11):
id1 = ''
id2 = id11
user1 = ''
email = ''
if request.method == 'POST':
id1 = request.POST['id']
course_id = id2
status = True
user1 = User.objects.filter(id=id1)
for i in user1:
username = i.username
first_name = i.first_name
last_name = i.last_name
email = i.email
is_staff = i.is_staff
user_id = i.id
if not registration.objects.filter(user_id=user_id, username=username, first_name=first_name,last_name=last_name, email=email, is_staff=is_staff, course_id=course_id, status=status).exists():
Registration = registration(user_id=user_id, username=username, first_name=first_name,last_name=last_name, email=email, is_staff=is_staff, course_id=course_id, status=status)
Registration.save()
current_user = request.user
registration1 = registration.objects.filter(user_id=current_user.id)
for i in registration1:
email = i.email
subject = 'Welcome to Edusite'
course_details = course.objects.filter(id=id2)
for i in course_details:
title = i.title
name = i.name
start_date = i.startdate
end_date = i.enddate
message ='''Dear Subscriber,
I am Yash Kathrotiya. You are Enrolled in Edusite Course , Please check the Enrollment status in course page . hope you will enjoye the course tutorial which will be given by Edusite ...
'''
message1 = message + '\n\t Name of the course is : '+ title
message2 = message1 + '\n\t Name of the Faculty is : '+ name
message4 = message2 + '\n\t Starting Date is : '+ str(start_date)
message5 = message4 + '\n\t Ending Date is : '+str(end_date)
message3 = message5 + " \n\n\t\t***** DO NOT REPLY ON THIS EMAIL*****"
recepient = email
# print(message3)
send_mail(subject,message3, EMAIL_HOST_USER, [recepient],fail_silently = False)
print("****************************** SUCCESS ******************************")
# return render(request, 'index.html')
return render(request, "register1.html")
else:
print('course_id :'+id2)
return render(request, 'register1.html', {'course_id': id2})
def dictionary(request):
yash =''
word =''
if request.method == 'POST':
word = request.POST['word']
yash =ud.define(word)
return render(request,'dictionary.html',{'word':yash,'word1':word})
| UTF-8 | Python | false | false | 6,408 | py | 20 | views.py | 16 | 0.603464 | 0.59176 | 0 | 187 | 33.26738 | 199 |
Leezr8023/try | 8,581,344,661,778 | 1151bd00675b48ebc5b812635357b7ef480fe1ea | 20cbaebf8778829b45ec8652a42ea6afd23d244c | /test.py | 8cdb49328d63c9f31096a7666dc543fcf747140c | []
| no_license | https://github.com/Leezr8023/try | c52ee5e072d5319c7d9fa30ab7d442ee87771bf3 | fa4d4b586476714c8f3273c75540dabc33c3f1c5 | refs/heads/master | 2020-12-05T07:59:19.757814 | 2020-02-06T02:21:41 | 2020-02-06T02:21:41 | 232,053,012 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import time
from ui_new import Ui_MainWindow
from Algorithm import qtoutput
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
from PyQt5.QtGui import QFont
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from mythread import myThread
from matplotlib.figure import Figure
from const import exitFlag,data
global exitFlag
class ApplicationWindow(QtWidgets.QMainWindow,Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
dynamic_canvas = FigureCanvas(Figure(figsize=(5, 3)))
self.verticalLayout.addWidget(dynamic_canvas)
self.addToolBar(QtCore.Qt.BottomToolBarArea,
NavigationToolbar(dynamic_canvas, self))
# self.textEdit.setFont(Qfont(14))#("Consolas", 14)
self.textEdit.setFont(QFont("Consolas", 14))
self.pushButton.clicked.connect(self.MeasureThread)
self.pushButton_2.clicked.connect(self.output)
self.pushButton_3.clicked.connect(self.opensetting)
self._dynamic_ax = dynamic_canvas.figure.subplots()
self._timer = dynamic_canvas.new_timer(
100, [(self._update_canvas, (), {})])
def output(self):
str = "{:.2f}"
str = str.format(qtoutput())
self.textEdit.setText(str)
# self.textEdit.setText(str(qtoutput()))
def opensetting(self):
print("槽函数触发成功")
pass
def MeasureThread(self):
print("123")
self.pushButton.setEnabled(False)
thread1 = myThread(1, "Thread-1", 1)
# thread2 = myThread(2, "Thread-2", 1)
self._timer.start()
thread1.start()
print(exitFlag[0])
def _update_canvas(self):
global exitFlag
if exitFlag[0]==1:
self._timer.stop()
self.pushButton.setEnabled(True)
self._dynamic_ax.clear()
global data
self._dynamic_ax.plot(range(1600), data[...,0])#data[...,1]
self._dynamic_ax.set_xlim(0, 1600)
self._dynamic_ax.set_ylim(0.0008, 0.00145)
self._dynamic_ax.figure.canvas.draw()
# if __name__ == "__main__":
# qapp = QtWidgets.QApplication(sys.argv)
# app = ApplicationWindow()
# app.show()
# qapp.exec_()
| UTF-8 | Python | false | false | 2,333 | py | 68 | test.py | 7 | 0.632169 | 0.608021 | 0 | 70 | 32.128571 | 69 |
pl77/redditPostArchiver | 4,423,816,351,520 | ef60654e3fdba364bec586b439319e15eb926ebe | 3b58ac6e6035968f30b45f92efd67c2395649da6 | /postids.py | 092301fbea8eb9df265ec3dff348700a80847ece | []
| no_license | https://github.com/pl77/redditPostArchiver | c493a9da78b8596bf820c9f234f7188295e2805f | 2dcac4e736883c792c6a4966554db9cae2cf84bc | refs/heads/master | 2022-08-06T23:46:50.505488 | 2022-07-22T15:11:13 | 2022-07-22T15:11:13 | 119,421,291 | 67 | 11 | null | true | 2022-07-22T15:11:14 | 2018-01-29T18:13:30 | 2022-06-11T22:18:39 | 2022-07-22T15:11:13 | 143 | 47 | 9 | 3 | Python | false | false | #!/usr/bin/env python3
import os
import sys
import arrow
import praw
import requests
import yaml
from prawcore.exceptions import RequestException, NotFound
from requests.exceptions import HTTPError
"""
Customization Configuration
"""
# Default post_id: #
username = 'GallowBoob'
# Path to which to output the file #
# output_file_path = './'
# The Path to the stylesheet, relative to where the html file will be stored #
path_to_css = 'css/style.css'
"""
Reddit Post Archiver
By Samuel Johnson Stoever
"""
if len(sys.argv) == 1:
print('No username was provided. Using default username.')
elif len(sys.argv) > 2:
print('Too Many Arguments. Using default username.')
else:
username = sys.argv[1]
username = username.rstrip('/')
if '/u/' in username:
username = username.split('/u/')[1]
elif '/user/' in username:
username = username.split('/user/')[1]
print('Processing all posts submitted by', username)
cred_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'credentials.yml')
credentials = yaml.load(open(cred_path), Loader=yaml.SafeLoader)
r = praw.Reddit(client_id=credentials['client_id'],
client_secret=credentials['client_secret'],
user_agent=credentials['user_agent'])
def get_user_post_id_set(user, first_id, postcount):
user_post_id_set = set()
if first_id is not None:
params = dict(after=first_id, count=postcount)
postgenerators = user.submissions.new(params=params)
else:
postgenerators = user.submissions.new()
try:
for post in postgenerators:
post_id = "{}\n".format(post.id)
user_post_id_set.add(post_id)
postcount += 1
if postgenerators.yielded == 100:
try:
first_id = postgenerators.params['after']
except KeyError:
first_id = None
except NotFound:
print('User not found with Reddit API. Most likely deleted.')
return user_post_id_set, first_id, postcount
def get_reddit_submissions(reddituser):
try:
user = r.redditor(reddituser)
except HTTPError:
print('Unable to write post ids: Invalid username or login credentials')
return
first_id = None
postcount = 0
try:
user_post_id_set, first_id, postcount = get_user_post_id_set(user, first_id, postcount)
except RequestException:
return
post_id_set = user_post_id_set
subnumber = len(user_post_id_set)
print("Received", subnumber, "posts from", reddituser)
totalsubnumber = subnumber
while subnumber > 99:
try:
user_post_id_set, first_id, postcount = get_user_post_id_set(user, first_id, postcount)
except RequestException:
break
subnumber = len(user_post_id_set)
totalsubnumber += subnumber
post_id_set |= user_post_id_set
print("Received additional", subnumber, "posts from Reddit for", reddituser, " - Total posts received so far:",
totalsubnumber, "with", len(post_id_set), "in set.")
return post_id_set
def get_push_submissions(reddituser):
push_post_id_set = set()
now = int(arrow.utcnow().timestamp())
linktemplate = "https://api.pushshift.io/reddit/search/submission/?author={author}" \
"&before={timestamp}&sort=desc&size=500"
url = linktemplate.format(author=reddituser, timestamp=now)
rp = requests.get(url)
push = rp.json()
earliest = now
subnumber = len(push['data'])
totalsubnumber = 0
print("Received", subnumber, "pushshift.io posts from", reddituser)
while subnumber > 0:
totalsubnumber += subnumber
itemlist = push['data']
push['data'] = list()
for item in itemlist:
if item['created_utc'] < earliest:
earliest = item['created_utc']
post_id = "{}\n".format(item['id'])
push_post_id_set.add(post_id)
url = linktemplate.format(author=reddituser, timestamp=earliest)
rp = requests.get(url)
push = rp.json()
subnumber = len(push['data'])
print("Received additional", subnumber, "posts from", reddituser, " - Total posts received so far:",
totalsubnumber, "with", len(push_post_id_set), "in pushshift.io set.")
return push_post_id_set
def main():
reddit_post_id_set = get_reddit_submissions(username)
push_post_id_set = get_push_submissions(username)
post_id_set = reddit_post_id_set.union(push_post_id_set)
print("Total posts submitted by", username, "in set:", len(post_id_set))
filedate = int(arrow.now().timestamp())
basedir = "/rpa" if os.environ.get('DOCKER', '0') == '1' else '.'
output_file_path = "{basedir}/{username}_{timestamp}.txt".format(basedir=basedir, username=username, timestamp=filedate)
with open(output_file_path, 'w', encoding='UTF-8') as post_file:
post_file.writelines(post_id_set)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 5,017 | py | 15 | postids.py | 9 | 0.635041 | 0.630855 | 0 | 147 | 33.129252 | 124 |
frankliumin/MemeRep | 1,297,080,147,461 | 24bab2a1655c6c89a14221762871f36af94c3f1e | 0b0af4c236f8f25dc54916bfe79dcc60323bea09 | /MemeRep/cal_fit.py | 711899233ed093ed05e8f92016632a9e58879bff | []
| no_license | https://github.com/frankliumin/MemeRep | 2d48f13103ee31215e4c04d0f553417458353203 | ecee6e882870cdcad976f427c7ef3c030de0a21d | refs/heads/master | 2022-11-15T07:53:41.905472 | 2020-07-07T08:58:21 | 2020-07-07T08:58:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 16:18:25 2020
@author: Administrator
"""
import numpy as np
from sklearn.cluster import KMeans
import random
import itertools
import time
def cal_fit(x1, adjData):
lamda = 0.5
x = x1[0:len(x1)]
k = len(list(set(x)))
label = list(set(x))
KKM = 0
RC = 0
for i in range(k):
numi = list(x).count(label[i])
temp = [index1 for index1 in range(len(x)) if x[index1] == label[i]] # 找到类别为label[i]的下标
all_com = list(itertools.combinations(temp, 2)) # 找出该社团内所有点的下标再进行组合
L1 = 0
for index2 in all_com:
if (adjData[index2[0]][index2[1]] == 1):
L1 = L1 + 1
degree_i = 0
for j in temp:
degree_i = degree_i + sum(adjData[j])
L2 = degree_i - 2 * L1
RC = RC + L2 / numi
KKM = KKM - L1 / numi
fit = -(2 * lamda * KKM + 2 * (1 - lamda) * RC)
return fit | UTF-8 | Python | false | false | 994 | py | 10 | cal_fit.py | 8 | 0.532839 | 0.486229 | 0 | 39 | 23.230769 | 96 |
KalePrajwal/PythonDB | 8,220,567,448,649 | 22af280f97ae5175e7fe03be44e6f19b51a8c7ff | 307ea510630d841f1ac44ebdeab43bb1879810d7 | /Search_Book.py | 4d2b1751d82ed6d6185dc21bcf04b8f3859fb115 | []
| no_license | https://github.com/KalePrajwal/PythonDB | 06526919d6b2f7b9c1d1a9a11f2ff868eb01f296 | 663e5a346f04e00c62f0f4113f40fec6765991c0 | refs/heads/main | 2023-09-04T12:14:14.474174 | 2021-11-08T14:42:37 | 2021-11-08T14:42:37 | 425,726,238 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mysql.connector as my
con = my.connect(host='bplx7zjluj8ztqdment0-mysql.services.clever-cloud.com', user='utdc4xugroxopi4q', password='l3A4aV1qVd3bMPBHITBG', database='bplx7zjluj8ztqdment0')
curs=con.cursor()
try:
id = int(input('Enter Book id : '))
curs.execute("Select * from Books where Book_Id = %d" %id)
data = curs.fetchall()
if data:
print()
print('Book Id : %d' %id)
print('Book Name : %s' %data[0][1])
print('Category : %s' %data[0][2])
print('Author : %s' %data[0][3])
print('Publication : %s' %data[0][4])
print('Edition : %s' %data[0][5])
print('Price : %.2f' %data[0][6])
else:
print()
print('Book not found')
except:
print()
print('Invalid input')
print()
con.close() | UTF-8 | Python | false | false | 851 | py | 8 | Search_Book.py | 7 | 0.547591 | 0.518214 | 0 | 28 | 28.464286 | 168 |
fiskercui/testlanguage | 4,587,025,110,945 | 15444854f40d9a9e1ed887769ad68895895b2f82 | bb72b975267b12fb678248ce565f3fd9bd6153ee | /testpython/testjson.py | a6274232e1d4815b4b70ac3e229f674bd21943f7 | []
| no_license | https://github.com/fiskercui/testlanguage | cfbcc84ffaa31a535a7f898d6c7b42dcbf2dc71b | b15746d9fa387172b749e54a90e8c6251750f192 | refs/heads/master | 2021-05-30T13:26:11.792822 | 2016-01-13T02:03:26 | 2016-01-13T02:03:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
print json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
print json.dumps("\"foo\bar")
print json.dumps(u'\u1234')
print json.dumps('\\')
print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
from StringIO import StringIO
io = StringIO()
json.dump(['streaming API'], io)
print "hello world"
print json.JSONEncoder().encode({"foo": ["bar", "baz"]})
print io.getvalue()
print("----------")
io = StringIO('["streaming API"]')
print json.load(io)
import types
import urllib2
import json
duan = "--------------------------"
def registerUrl():
try:
url ="http://m.weather.com.cn/data/101010100.html"
data = urllib2.urlopen(url).read()
return data
except Exception,e:
print e
def jsonFile(fileData):
file = open("d:\json.txt","w")
file.write(fileData)
file.close()
def praserJsonFile(jsonData):
value = json.loads(jsonData)
rootlist = value.keys()
print rootlist
print duan
for rootkey in rootlist:
print rootkey
print duan
subvalue = value[rootkey]
print subvalue
print duan
for subkey in subvalue:
print subkey,subvalue[subkey]
if __name__ == "__main__":
# xinput = raw_input()
# x = 130
# xvalue = cmp(x,xinput)
# print xvalue
# print x/100.0
# data = open('test.json', 'r')
# print json.load(data)
with open("test.json") as json_file:
json_data = json.load(json_file)
print(json_data)
# data = []
# with open('test.json') as f:
# for line in f:
# data.append(json.loads(line))
# from pprint import pprint
# json_data=open('./test.json')
# print json_data
# data = json.load(json_data)
# pprint(data)
# json_data.close()
# print data
# data = registerUrl()
# jsonFile(data)
# praserJsonFile(data)
| UTF-8 | Python | false | false | 1,722 | py | 101 | testjson.py | 44 | 0.633566 | 0.615563 | 0 | 83 | 19.710843 | 58 |
kurtejung/PurdueForest | 16,999,480,565,501 | eaacd7b6b7b06b152ca3e694521a5c77ba2f95c0 | 21fb99baaaeaed674cd99fb1a5d39ea54a1db827 | /HLTrigger/HLTanalyzers/python/HLTopen_cff.py | 69902c047a0726383b4e84bb8ec7e023b7c5985c | []
| no_license | https://github.com/kurtejung/PurdueForest | e413d4bc953df386a50d01ca09d1b648d07a926c | 9c7de9e7452b0837a872cfdd428244cb46b55322 | refs/heads/master | 2020-05-16T20:32:30.896495 | 2014-06-02T18:42:18 | 2014-06-02T18:42:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import FWCore.ParameterSet.Config as cms
# import the whole HLT menu
from HLTrigger.HLTanalyzers.HLT_FULL_cff import *
hltL1IsoR9shape = cms.EDProducer( "EgammaHLTR9Producer",
recoEcalCandidateProducer = cms.InputTag( "hltL1IsoRecoEcalCandidate" ),
ecalRechitEB = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEB' ),
ecalRechitEE = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEE' ),
useSwissCross = cms.bool( False )
)
hltL1NonIsoR9shape = cms.EDProducer( "EgammaHLTR9Producer",
recoEcalCandidateProducer = cms.InputTag( "hltL1NonIsoRecoEcalCandidate" ),
ecalRechitEB = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEB' ),
ecalRechitEE = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEE' ),
useSwissCross = cms.bool( False )
)
HLTEgammaR9ShapeSequence = cms.Sequence( hltL1IsoR9shape + hltL1NonIsoR9shape )
hltMuTrackJpsiPixelTrackSelector.MinMasses = cms.vdouble ( 2.0, 60.0 )
hltMuTrackJpsiPixelTrackSelector.MaxMasses = cms.vdouble ( 4.6, 120.0 )
hltMu5Track1JpsiPixelMassFiltered.MinMasses = cms.vdouble ( 2.0, 60.0 )
hltMu5Track1JpsiPixelMassFiltered.MaxMasses = cms.vdouble ( 4.6, 120.0 )
hltMu5TkMuJpsiTrackMassFiltered.MinMasses = cms.vdouble ( 2.5, 60.0 )
hltMu5TkMuJpsiTrackMassFiltered.MaxMasses = cms.vdouble ( 4.1, 120.0 )
hltMu5Track2JpsiTrackMassFiltered.MinMasses = cms.vdouble ( 2.7, 60.0 )
hltMu5Track2JpsiTrackMassFiltered.MaxMasses = cms.vdouble ( 3.5, 120.0 )
hltMu5L2Mu2JpsiTrackMassFiltered.MinMasses = cms.vdouble ( 1.8, 50.0 )
hltMu5L2Mu2JpsiTrackMassFiltered.MaxMasses = cms.vdouble ( 4.5, 130.0 )
#hltLowMassDisplacedL3Filtered.MaxEta = cms.double(3.0)
#hltLowMassDisplacedL3Filtered.MinPtPair = cms.double( 0.0 )
#hltLowMassDisplacedL3Filtered.MinPtMin = cms.double( 0.0 )
#hltLowMassDisplacedL3Filtered.MaxInvMass = cms.double( 11.5 )
#
#hltDisplacedmumuFilterLowMass.MinLxySignificance = cms.double( 0.0 )
#hltDisplacedmumuFilterLowMass.MinVtxProbability = cms.double( 0.0 )
#hltDisplacedmumuFilterLowMass.MinCosinePointingAngle = cms.double( -2.0 )
#HLTDisplacemumuSequence = cms.Sequence( hltL1sL1DoubleMu0 + hltDimuonL1Filtered0 + hltDimuonL2PreFiltered0 + hltLowMassDisplacedL3Filtered + hltDisplacedmumuVtxProducerLowMass + hltDisplacedmumuFilterLowMass)
# create the jetMET HLT reco path
DoHLTJets = cms.Path(HLTBeginSequence +
HLTBeginSequence +
HLTRecoJetSequenceAK5Corrected +
HLTRecoJetSequenceAK5L1FastJetCorrected +
HLTRecoMETSequence +
HLTDoLocalHcalWithoutHOSequence
)
DoHLTJetsU = cms.Path(HLTBeginSequence +
HLTBeginSequence +
HLTRecoJetSequenceAK5Uncorrected +
HLTRecoMETSequence
)
# create the muon HLT reco path
DoHltMuon = cms.Path(
HLTBeginSequence +
HLTL2muonrecoSequenceNoVtx +
HLTL2muonrecoSequence +
HLTL2muonisorecoSequence +
HLTL3muonrecoSequence +
HLTL3muonisorecoSequence +
HLTL3muonTkIso10recoSequence +
HLTMuTrackJpsiPixelRecoSequence +
HLTMuTrackJpsiTrackRecoSequence +
## HLTDisplacemumuSequence +
HLTDoLocalPixelSequence +
hltPixelTracks +
HLTDoLocalStripSequence +
hltMuTrackSeeds +
hltMuCkfTrackCandidates +
hltMuCtfTracks +
hltDiMuonMerging +
HLTL3muonrecoNocandSequence +
hltDiMuonLinks +
hltGlbTrkMuons +
hltGlbTrkMuonCands +
HLTEndSequence )
# create the Egamma HLT reco paths
DoHLTPhoton = cms.Path(
HLTBeginSequence +
HLTDoRegionalEgammaEcalSequence +
HLTL1IsolatedEcalClustersSequence +
HLTL1NonIsolatedEcalClustersSequence +
hltL1IsoRecoEcalCandidate +
hltL1NonIsoRecoEcalCandidate +
HLTEgammaR9ShapeSequence +
HLTEgammaR9IDSequence +
hltL1IsolatedPhotonEcalIsol +
hltL1NonIsolatedPhotonEcalIsol +
hltL1IsolatedPhotonHcalIsol +
hltL1NonIsolatedPhotonHcalIsol +
HLTDoLocalPixelSequence +
HLTDoLocalStripSequence +
hltL1IsoEgammaRegionalPixelSeedGenerator +
hltL1IsoEgammaRegionalCkfTrackCandidates +
hltL1IsoEgammaRegionalCTFFinalFitWithMaterial +
hltL1NonIsoEgammaRegionalPixelSeedGenerator +
hltL1NonIsoEgammaRegionalCkfTrackCandidates +
hltL1NonIsoEgammaRegionalCTFFinalFitWithMaterial +
hltL1IsolatedPhotonHollowTrackIsol +
hltL1NonIsolatedPhotonHollowTrackIsol +
HLTEcalActivitySequence +
hltActivityPhotonHcalForHE +
hltActivityR9ID +
hltActivityPhotonClusterShape +
hltActivityPhotonEcalIsol +
hltActivityPhotonHcalIsol +
HLTEcalActivityEgammaRegionalRecoTrackerSequence +
hltEcalActivityEgammaRegionalAnalyticalTrackSelector +
hltActivityPhotonHollowTrackIsolWithId
## hltActivityPhotonHollowTrackIsol
)
DoHLTElectron = cms.Path(
HLTBeginSequence +
HLTDoRegionalEgammaEcalSequence +
HLTL1IsolatedEcalClustersSequence +
HLTL1NonIsolatedEcalClustersSequence +
hltL1IsoRecoEcalCandidate +
hltL1NonIsoRecoEcalCandidate +
HLTEgammaR9ShapeSequence +#was commented out for HT jobs
HLTEgammaR9IDSequence +
hltL1IsoHLTClusterShape +
hltL1NonIsoHLTClusterShape +
hltL1IsolatedPhotonEcalIsol +
hltL1NonIsolatedPhotonEcalIsol +
HLTDoLocalHcalWithoutHOSequence +
hltL1IsolatedPhotonHcalForHE +
hltL1NonIsolatedPhotonHcalForHE +
hltL1IsolatedPhotonHcalIsol +
hltL1NonIsolatedPhotonHcalIsol +
HLTDoLocalPixelSequence +
HLTDoLocalStripSequence +
hltL1IsoStartUpElectronPixelSeeds +
hltL1NonIsoStartUpElectronPixelSeeds +
hltCkfL1IsoTrackCandidates +
hltCtfL1IsoWithMaterialTracks +
hltPixelMatchElectronsL1Iso +
hltCkfL1NonIsoTrackCandidates +
hltCtfL1NonIsoWithMaterialTracks +
hltPixelMatchElectronsL1NonIso +
hltElectronL1IsoDetaDphi +
hltElectronL1NonIsoDetaDphi +
HLTL1IsoEgammaRegionalRecoTrackerSequence +
HLTL1NonIsoEgammaRegionalRecoTrackerSequence +
hltL1IsoElectronTrackIsol +
hltL1NonIsoElectronTrackIsol +
hltHFEMClusters +
hltHFRecoEcalTightCandidate
)
# create the tau HLT reco path
from HLTrigger.HLTanalyzers.OpenHLT_Tau_cff import *
DoHLTTau = cms.Path(HLTBeginSequence +
OpenHLTCaloTausCreatorSequence +
openhltL2TauJets +
openhltL2TauIsolationProducer +
HLTDoLocalPixelSequence +
HLTRecopixelvertexingSequence +
OpenHLTL25TauTrackReconstructionSequence +
OpenHLTL25TauTrackIsolation +
TauOpenHLT+
HLTRecoJetSequencePrePF +
HLTPFJetTriggerSequence +
HLTPFJetTriggerSequenceForTaus +
HLTPFTauSequence +
HLTEndSequence)
# create the b-jet HLT paths
from HLTrigger.HLTanalyzers.OpenHLT_BJet_cff import *
DoHLTBTag = cms.Path(
HLTBeginSequence +
OpenHLTBLifetimeL25recoSequence +
OpenHLTBSoftMuonL25recoSequence +
OpenHLTBLifetimeL3recoSequence +
OpenHLTBSoftMuonL3recoSequence +
HLTEndSequence )
DoHLTAlCaPi0Eta1E31 = cms.Path(
HLTBeginSequence +
hltL1sAlCaEcalPi0Eta +
HLTDoRegionalPi0EtaSequence +
HLTEndSequence )
DoHLTAlCaPi0Eta8E29 = cms.Path(
HLTBeginSequence +
hltL1sAlCaEcalPi0Eta +
HLTDoRegionalPi0EtaSequence +
HLTEndSequence )
DoHLTAlCaECALPhiSym = cms.Path(
HLTBeginSequence +
hltEcalRawToRecHitFacility + hltESRawToRecHitFacility + hltEcalRegionalRestFEDs + hltEcalRecHitAll +
HLTEndSequence )
DoHLTMinBiasPixelTracks = cms.Path(
HLTBeginSequence +
HLTDoLocalPixelSequence +
HLTDoHILocalPixelSequence +
HLTPixelTrackingForHITrackTrigger +
hltPixelCandsForHITrackTrigger +
hltPixelTracks +
hltPixelVertices)
| UTF-8 | Python | false | false | 8,066 | py | 122 | HLTopen_cff.py | 75 | 0.72849 | 0.704066 | 0 | 215 | 36.511628 | 210 |
impiyush83/slambook | 11,699,490,929,656 | 9e6b08fe369787f6b5ffcef866d2389f8212c523 | bba538f0246c01407b16fcf990df5e2af0d93cfc | /slambook_restful/resource/customer_homepage.py | 683292c7246b425b6916470b5c2d4c82d3aff13c | []
| no_license | https://github.com/impiyush83/slambook | 7218f6ed9e3905dcf8531744984c58bba8a9827f | 1c3cbb9a6e69199a3978dd51734dd617d1200f4e | refs/heads/master | 2022-12-10T23:49:40.962475 | 2019-07-07T13:50:53 | 2019-07-07T13:50:53 | 142,017,369 | 0 | 0 | null | false | 2019-10-24T19:25:28 | 2018-07-23T13:21:46 | 2019-10-24T19:23:24 | 2019-10-24T19:25:27 | 10,014 | 0 | 0 | 0 | CSS | false | false | from flask import current_app as app, render_template, make_response
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from constants.common_constants import headers
from slambook_restful.models.Friend.friend import Friend
from slambook_restful.models.Secret.secret import Secret
from slambook_restful.models.User.user import User
from slambook_restful.utils.resource_exceptions import exception_handle
from slambook_restful.views.secret_key import get_secret_key
class CustomerHomepage(Resource):
decorators = [jwt_required, exception_handle]
def __init__(self):
app.logger.info('In the constructor of {}'.format(self.__class__.__name__))
def get(self):
"""
.. http:get:: /user/home
This api will be used to render homepage
**Example request**:
.. sourcecode:: http
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
}
:statuscode 200: json
:statuscode 400: value error
"""
user_id = get_jwt_identity()
user = User.with_id(user_id)
friends = Friend.get_friends_with_email_address(user.id)
is_secret_key_created = Secret.is_secret_key_created(user.email)
secret_key = get_secret_key(is_secret_key_created)
return make_response(
render_template('homepage.html', friends=friends, is_secret_key=is_secret_key_created,
len=len(friends), secret_key=secret_key),
headers)
| UTF-8 | Python | false | false | 1,638 | py | 30 | customer_homepage.py | 23 | 0.647741 | 0.641026 | 0 | 57 | 27.736842 | 98 |
loum/pyrob | 15,015,205,680,592 | 07693ac6d4e671c7fa59e508295a03382acaea1c | fbe55b7d87d11faa7911a23cc78df02bc6af5406 | /pyrob/schema/ipact_tran/zabbix/zbx_device_energy.py | fe9e29f2ce9dac378bef77988a2f639fdacc1768 | []
| no_license | https://github.com/loum/pyrob | 463e25fba2fa6ee1f6da8f256fd42d09a7fc88bd | 551ebdfc449fb565d7f324297887401ec553f6c5 | refs/heads/master | 2020-07-31T12:08:30.095751 | 2019-09-24T12:34:39 | 2019-09-24T12:34:39 | 210,598,588 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """IPACT_TRAN T_TRN_ZBX_DEVICE_ENERGY table class mapping.
"""
from sqlalchemy import Column, String
import sqlalchemy.types
import pyrob.table
import pyrob.schema.types
class ZbxDeviceEnergy(pyrob.table.Common, pyrob.table.Base):
__tablename__ = 'T_TRN_ZBX_DEVICE_ENERGY'
__table_args__ = {'schema': 'IPACT_TRAN'}
physical_name = Column(String(25), primary_key=True, nullable=False)
energy_consumed = Column(pyrob.schema.types.Float)
battery_temperature = Column(pyrob.schema.types.Float)
mains_voltage = Column(pyrob.schema.types.Float)
time_captured_utc = Column(pyrob.schema.types.OraDateParser,
primary_key=True,
nullable=False)
battery_current = Column(pyrob.schema.types.Float)
load_current = Column(pyrob.schema.types.Float)
output_voltage = Column(pyrob.schema.types.Float)
rectifier_current = Column(pyrob.schema.types.Float)
ipact_dl_process_id = Column(pyrob.schema.types.Int)
record_created_userid = Column(pyrob.schema.types.Int)
record_created_date = Column(pyrob.schema.types.OraDateParser)
@sqlalchemy.ext.hybrid.hybrid_property
def source_table_details(self):
return {
self.__tablename__: (
'PHYSICAL_NAME',
'TIME_CAPTURED_UTC',
'ENERGY_CONSUMED',
)
}
@sqlalchemy.ext.hybrid.hybrid_property
def source_table_order_by(self):
return (
'PHYSICAL_NAME',
'TIME_CAPTURED_UTC',
)
@sqlalchemy.ext.hybrid.hybrid_property
def source_table_date_range_col(self):
return 'TIME_CAPTURED_UTC'
@sqlalchemy.event.listens_for(ZbxDeviceEnergy, 'before_insert')
def gen_default(mapper, connection, instance):
if instance.energy_consumed is None:
instance.energy_consumed = instance.output_voltage * instance.rectifier_current * 0.25
| UTF-8 | Python | false | false | 1,931 | py | 682 | zbx_device_energy.py | 487 | 0.659762 | 0.657172 | 0 | 55 | 34.109091 | 94 |
Naught0/qtbot | 14,001,593,405,335 | 075c185debf3d3aa830895906adaa860d768ab95 | 5616e441a3bd34cb46ab211ce625609b4cebca3c | /cogs/news.py | dcbc1b8f7c718352870054505a6b8b4e54793ed6 | [
"MIT"
]
| permissive | https://github.com/Naught0/qtbot | 08a55ae758f8f42611cdc186fc37749ec9d3e646 | d5824aebc4892b3fbe24c2d053dbb4052c57b5ae | refs/heads/master | 2023-06-24T15:51:22.505671 | 2023-06-15T19:19:41 | 2023-06-15T19:45:12 | 93,383,576 | 9 | 9 | MIT | false | 2023-05-23T00:28:18 | 2017-06-05T08:43:16 | 2023-02-20T17:32:04 | 2023-05-23T00:28:18 | 5,100 | 3 | 1 | 2 | Python | false | false | import json
import asyncio
import discord
from urllib.parse import quote
from dateutil.parser import parse
from datetime import datetime, timedelta
from utils import aiohttp_wrap as aw
from discord.ext import commands
class News(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.redis_client = bot.redis_client
self.aio_session = bot.aio_session
self.uri = "http://api.mediastack.com/v1/news"
with open("data/apikeys.json") as f:
self.api_key = json.load(f)["news"]
@staticmethod
def date_range() -> str:
return f"{(datetime.today() - timedelta(days=5)).isoformat().split('T')[0]},{datetime.today().isoformat().split('T')[0]}"
@staticmethod
def json_to_embed(json_dict: dict) -> discord.Embed:
em = discord.Embed()
em.title = json_dict["title"]
em.description = json_dict["description"]
em.url = json_dict["url"]
# This field is empty sometimes -> handle it
if json_dict["image"]:
em.set_thumbnail(url=json_dict["image"])
em.set_footer(text=json_dict["source"])
em.timestamp = parse(json_dict["published_at"])
return em
@commands.command(name="news")
async def get_news(
self,
ctx: commands.Context,
*,
query: commands.clean_content(escape_markdown=True) = None,
):
"""Get the latest and greatest news or optionally search for some specific news stories"""
# Add Emojis for navigation
emoji_tup = tuple(f"{x}\U000020e3" for x in range(1, 10))
em_dict = {}
params = (
{
"keywords": quote(query),
"languages": "en",
"limit": 9,
"date": self.date_range(),
"sort": "popularity",
"categories": "-sports,-entertainment,-business",
"access_key": self.api_key,
}
if query
else {
"languages": "en",
"limit": 9,
"date": self.date_range(),
"sort": "popularity",
"categories": "-sports,-entertainment,-business",
"countries": "us,gb",
"access_key": self.api_key,
}
)
redis_key = f"news:{query}" if query else "news"
if await self.redis_client.exists(redis_key):
raw_json_string = await self.redis_client.get(redis_key)
raw_json_dict = json.loads(raw_json_string)
article_list = raw_json_dict["data"]
for idx, article in enumerate(article_list[:9]):
em_dict[emoji_tup[idx]] = self.json_to_embed(article)
else:
api_response = await aw.aio_get_json(
self.aio_session,
self.uri,
params=params,
)
if api_response is None:
return await ctx.error(
"API error",
description="Something went wrong with that request. Try again later.",
)
article_list = api_response["data"]
if len(article_list) == 0:
return await ctx.error(
"No articles found",
description=f"Couldn't find any news on `{query}`",
)
await self.redis_client.set(redis_key, json.dumps(api_response), ex=10 * 60)
for idx, article in enumerate(article_list):
em_dict[emoji_tup[idx]] = self.json_to_embed(article)
bot_message = await ctx.send(embed=em_dict[emoji_tup[0]])
for emoji in emoji_tup[: len(article_list)]:
await bot_message.add_reaction(emoji)
def check(reaction, user):
return user == ctx.author and reaction.emoji in emoji_tup and reaction.message.id == bot_message.id
while True:
try:
reaction, user = await self.bot.wait_for("reaction_add", check=check, timeout=30.0)
except asyncio.TimeoutError:
return await bot_message.clear_reactions()
if reaction.emoji in em_dict:
await bot_message.edit(embed=em_dict[reaction.emoji])
await bot_message.remove_reaction(reaction.emoji, ctx.author)
def setup(bot):
bot.add_cog(News(bot))
| UTF-8 | Python | false | false | 4,396 | py | 59 | news.py | 51 | 0.544586 | 0.538672 | 0 | 131 | 32.557252 | 129 |
fcea/scrape_InflFore_Lid | 4,604,204,973,838 | 7855838359b0713de7ad99f8485eb617a5f8f706 | 5954f1181b3654cbfdaea011326013af769ea012 | /Lider_Webscraper_Alimentos.py | f6c3aa3ec4eec743e5baa3a22efd69bd5884d51a | []
| no_license | https://github.com/fcea/scrape_InflFore_Lid | 7db2873ed9256db5c268f6d5acb0f35438d9c0bd | 1e8dc8f3a976d485d73bb7a2172b5ed1a04e927b | refs/heads/master | 2021-01-22T20:43:54.928600 | 2013-08-28T05:46:30 | 2013-08-28T05:46:30 | 11,716,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
from math import ceil
import datetime
import re
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import datetime
from bs4 import UnicodeDammit
import MySQLdb as mdb
import time
import sys
import random
#METODO QUE CREA LA TABLA SQL EN CASO DE QUE NO EXISTA
def create_Table_Productos(name):
con = mdb.connect('localhost', 'root', 'password', 'LiderWebscraping');
with con:
cur = con.cursor()
#cur.execute("DROP TABLE IF EXISTS "+tableName)
cur.execute("CREATE TABLE IF NOT EXISTS "+name+"(SKU VARCHAR(50),\
Categoria VARCHAR(200) ,\
Cod_Categoria INT ,\
Marca VARCHAR(200) ,\
Producto VARCHAR(1000) ,\
FechaEjecucion VARCHAR(20) ,\
Disponibilidad INT ,\
PrecioProducto INT ,\
PrecioGranel INT ,\
PrecioNormal INT)")
con.close()
#METODO QUE COORDINA EL SCRAPING DE TODO EL SITIO WEB
#RECORRE LOS LINKS, DETERMINA LOS QUE SIRVEN.
#DE LOS QUE SIRVEN VERIFICA CUALES AUN NO HAN SIDO VISITADOS
#HACE LA REVISION Y DESPUES LLAMA A LA FUNCION QUE INSERTA EN LA TABLA
def lets_Scrape_Supermarket(url, fecha_ejec, tableName):
driver = webdriver.Firefox()
driver.get(url)
superRef = driver.find_element_by_xpath('/html/body/div[3]/div[3]/ul/li/a')
superRef.click()
links = driver.find_elements_by_css_selector(".cursorpointer.alertCheck")
linksTmp =[]
nombresTmp = []
cont=0
for ref in links:
temp = ref.get_attribute("outerHTML").encode('utf-8').strip()
#BGood y Experto en la Parrilla ya fueron guardados y corresponden a vistas especiales que lider genera para reordenar productos existentes
#"Huggies" y "Vinos y Mas" son categorias no visitadas pero que tienen una vista distinta. En este caso entonces se procede a generar / inputar el link directamente.
if ref.get_attribute("innerHTML").encode('utf-8').strip() != "B Good" and ref.get_attribute("innerHTML").encode('utf-8').strip() != "Experto en la Parrilla" and temp.find("categoryFood") == -1 and temp.find("specialCategoryFood") == -1 and temp.find("CAT_GM") == -1:
if ref.get_attribute("innerHTML").encode('utf-8').strip() == "Huggies":
linksTmp.append('http://www.lider.cl/dys/catalog/product/productList.jsp?id=cat870020')
elif ref.get_attribute("innerHTML").encode('utf-8').strip()[:9] == "Vinos y M":
linksTmp.append('http://www.lider.cl/dys/catalog/product/productList.jsp?id=CF_Nivel3_000159')
else:
linksTmp.append("http://www.lider.cl" + get_Link(temp))
nombresTmp.append(str(cont) + "|" + ref.get_attribute("innerHTML").encode('utf-8').strip())
cont = cont+1
#driver.close()
cont = 0
#busco id categoria del link mas alta ingresada, asigno el valor al contador e itero desde ese punto
cont = data_Inserted(tableName)
while cont < len(linksTmp):
print "Ejecuto es " + nombresTmp[cont]
resuPart = get_info_Supermarket(driver, linksTmp[cont], fecha_ejec, nombresTmp[cont], tableName) #recupero 7 elementos a insertar, falta la fecha, categoria y cod_cate, pero estan ya aca
cont = cont + 1
driver.close()
#METODO QUE SIRVE PARA CAMBIAR EL FORMATO DE LOS NUMEROS QUE RECUPERO DESDE LIDER
#RETORNA LA FECHA SIN SIGNOS $ NI PUNTOS
def modify_Prices(formPrice):
form1 = formPrice.replace(".","")
form2 = form1.replace("$","")
return form2
#METODO QUE SIRVE PARA VERIFICAR EL ULTIMO DATO INSERTADO EN LA TABLA SQL
#RETORNA 0 SI LA TABLA ESTA VACIA. DE LO CONTRARIO RETORNA EL CODIGO MAS ALTO INGRESADO.
def data_Inserted(tableName):
con = mdb.connect('localhost', 'root', 'password', 'LiderWebscraping');
with con:
cur = con.cursor()
cur.execute("SELECT Cod_Categoria FROM " +tableName)
#Esto me hace pasar la lista a un arreglo
row = [item[0] for item in cur.fetchall()]
con.close()
try:
maxim = max(row)
return maxim
except ValueError:
return 0
#METODO AUXILIAR PARA DEJAR EL LINK CON EL FORMATO APROPIADO
#RETORNA EL LINK CON TRIMS
def get_Link(unfText):
st = unfText.find('lang')+6
nd = unfText.find('"', unfText.find('lang')+6)
return unfText[st:nd]
#METODO QUE HACE EL INSERT DENTRO DE LA TABLA SQL
def word_to_SQL_insertion(concatString, tableName):
con = mdb.connect('localhost', 'root', 'password', 'LiderWebscraping');
with con:
cur = con.cursor()
if cur.execute("SELECT * FROM "+tableName+" WHERE SKU=%s",concatString.split("|")[0]) == 0:
cur.execute("INSERT INTO "+tableName+" VALUES (%s,%s,%s,%s, %s,%s,%s,%s,%s, %s)",(concatString.split("|")[0],concatString.split("|")[1],int(concatString.split("|")[2]), concatString.split("|")[3],concatString.split("|")[4],concatString.split("|")[5],int(concatString.split("|")[6]),int(concatString.split("|")[7]),int(concatString.split("|")[8]),int(concatString.split("|")[9])))
else:
#print 'Nada que hacer con ' + str(concatString.split("|")[0])
pass
con.close()
def get_Seven_Data(ele1):
#Obtengo el SKU. No hay metodo eficiente, solo busqueda en string
auxText = ele1.get_attribute("outerHTML").encode('utf-8').strip()
skuProd = auxText[auxText.find("skuId=")+6:auxText.find("&",auxText.find("skuId=")) ]
#Saco ahora la informacion de los precios y otros. Aprovecho la estructura del texto.
info = ele1.find_elements_by_tag_name("a")
#info[0] -> data inutil
#info[1] -> marca
#info[2] -> detalle
#info[3] -> carro de compras
#info[4] -> carro de compras
#info[5] -> solo los productos que estan disponibles
#De este modo, obtengo la marca y el detalle
try:
marcaDirt = info[1].get_attribute("innerHTML").encode('utf-8').strip()
exist =1
except:
exist = 0
if exist == 1:
marcaProd = marcaDirt[marcaDirt.find(">")+1:marcaDirt.find("<",marcaDirt.find(">"))]
detalleProd = info[2].get_attribute("innerHTML").encode('utf-8').strip()
#La existencia de la clase bloqueada me indica si el producto esta actualmente disponible.
try:
availInd = ele1.find_element_by_class_name("ech_form_disabledDiv").get_attribute("innerHTML").encode('utf-8').strip()
availInd = 0
except:
availInd = 1
try:
tiposPrecios = ele1.find_elements_by_class_name("retail")
if len(tiposPrecios)==0: #significa que hay solo precio primario
mainPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
granelPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
normalPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
elif len(tiposPrecios)==1: #significa que hay precio primario y precio granel
auxPrice=tiposPrecios[0].get_attribute("innerHTML").encode('utf-8').strip()
mainPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
granelPrice = modify_Prices(re.findall('\${1}\s*[\.0-9]{1,10}\s*', auxPrice)[0]).strip()
normalPrice = modify_Prices(re.findall('\${1}\s*[\.0-9]{1,10}\s*', auxPrice)[0]).strip()
elif len(tiposPrecios)==2: #significa que hay precio primario, granel y normal (ergo el primario esta en oferta)
#el precio no lo puedo sacar directamente, tengo que hacer un parser manual sobre el texto outer completo
hiddenPrice = ele1.get_attribute("outerHTML").encode('utf-8').strip().split('<small class="retail">')
maskedPrice = tiposPrecios[1].get_attribute("outerHTML").encode('utf-8').strip()
mainPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
granelPrice = modify_Prices(re.findall('\${1}\s*[\.0-9]{1,10}\s*', maskedPrice)[0]).strip()
normalPrice = modify_Prices(hiddenPrice[1][hiddenPrice[1].find('$'):].strip())
else: #caso no determinado
print 'Hay un caso que no se esta capturando!'
sys.exit(0)
except: #significa que no hay ningun tag con ese nombre
mainPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
granelPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
normalPrice = modify_Prices(ele1.find_element_by_class_name("price").get_attribute("innerHTML").encode('utf-8').strip())
return [skuProd , marcaProd, detalleProd, availInd, mainPrice, granelPrice, normalPrice]
else:
return [0]
#METODO QUE RECUPERA LA INFORMACION DESDE LA PAGINA (HACE EL WEBSCRAPE )
#RETORNA 7 PARAMETROS DE INFORMACION AL METODO PRINCIPAL. VAN SEPARADOS POR "|" PARA SPLITEAR DESPUES
def get_info_Supermarket(driver, urls, fecha_ejec, doubleParam, tableName):
#genero la url correcta con todos los elementos cargados
#driver = webdriver.Firefox()
driver.get(urls)
longit = driver.find_element_by_class_name('pages-shopping').find_element_by_class_name('info')
numUnclean = longit.get_attribute("innerHTML").encode('utf-8').strip().split('<strong>')[2]
numElem = numUnclean[:numUnclean.find('<')]
urrs = urls[:urls.find('&')]+ '&pageSize=' + str(numElem) + '&goToPage=1'
#tengo la url nueva correcta
driver.get(urrs)
#Recupero informacion
element2 = driver.find_elements_by_class_name('prod_referencia') #sku
element3 = driver.find_elements_by_class_name('price') #precio rojo
#captura la informacion de todos los productos no featured
elementemp = driver.find_elements_by_css_selector('.product.ech_prod') #recupero la marca, detalle y si tiene stock.
#captura la informacion de todos los productos featured
elementfeat = driver.find_elements_by_css_selector('.product.first.prominent_block.no-borde') #recupero la marca, detalle y si tiene stock.
#Para productos featured
for ele1 in elementfeat:
retorno = get_Seven_Data(ele1)
#Es 0 solo en el caso que haya grilla pero no producto
if retorno[0] != 0:
#Proceso los parametros que no tengo
codeCategory = doubleParam.split("|")[0]
nameCategory = doubleParam.split("|")[1]
#Hago el llamado a la funcion que inserta en la base de datos
inputSQL =retorno[0] +"|"+ nameCategory +"|"+ str(codeCategory) +"|"+ retorno[1] +"|"+ retorno[2] +"|"+ fecha_ejec +"|"+ str(retorno[3]) +"|"+ str(retorno[4]) +"|"+ str(retorno[5]) +"|"+ str(retorno[6])
word_to_SQL_insertion(inputSQL, tableName)
else:
pass
#Para productos no featured
for ele1 in elementemp:
retorno = get_Seven_Data(ele1)
#Es 0 solo en el caso que haya grilla pero no producto
if retorno[0] != 0:
#Proceso los parametros que no tengo
codeCategory = doubleParam.split("|")[0]
nameCategory = doubleParam.split("|")[1]
#Hago el llamado a la funcion que inserta en la base de datos
inputSQL =retorno[0] +"|"+ nameCategory +"|"+ str(codeCategory) +"|"+ retorno[1] +"|"+ retorno[2] +"|"+ fecha_ejec +"|"+ str(retorno[3]) +"|"+ str(retorno[4]) +"|"+ str(retorno[5]) +"|"+ str(retorno[6])
word_to_SQL_insertion(inputSQL, tableName)
else:
pass
#driver.close()
return
#MAIN
if __name__ == '__main__':
#fecha del dia
d = datetime.datetime.now().strftime("%Y-%m-%d")
d_name = d.replace("-","")
#inicializo tabla en MySQL
LiderSupermarket = d_name + '_Lider_Supermarket'
create_Table_Productos(LiderSupermarket)
#ejecuto el proceso
lets_Scrape_Supermarket('http://www.lider.cl/dys/',d,LiderSupermarket)
| UTF-8 | Python | false | false | 11,249 | py | 8 | Lider_Webscraper_Alimentos.py | 7 | 0.698551 | 0.682994 | 0 | 235 | 46.868085 | 386 |
kishmakov/rs | 1,425,929,181,365 | 25fdd4766fe77277721e608e413b18eaa25ac1e1 | 1d62731ed2ebc3c84ed27d40c7d0417f6ce0ab0e | /rs/urls.py | b484a9e53dfeca1d32c3c23fc53ab0c39b628994 | []
| no_license | https://github.com/kishmakov/rs | 064fd19aeac4dafda8788a35aa3bb5c5e78c687c | c5e932c36fd2a62625aaf5b374f754ee8b839602 | refs/heads/master | 2021-01-10T08:25:21.901462 | 2013-03-23T18:04:55 | 2013-03-23T18:04:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns
from app.views import process_beg, process_go
import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
('^$', process_beg),
('^go/$', process_go),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)
| UTF-8 | Python | false | false | 392 | py | 21 | urls.py | 4 | 0.693878 | 0.693878 | 0 | 13 | 29.153846 | 100 |
eco3/stochastik-ss21 | 7,662,221,700,551 | 3c34e698f52145225364495abc042a20ce008a31 | 7b43998db0d31e0e2036f4c06681d263a7875f5b | /Klausurtools/tools/multivariate_statistik.py | efbeeb0cd43a2cdd07aaa60c0c5c5bcab596747e | []
| no_license | https://github.com/eco3/stochastik-ss21 | 3dbabf87495a6274730b799c630a3b98624766cc | 3974a51c3b7c5dc9ebc79df01fc14cf635da9b11 | refs/heads/master | 2023-06-16T23:46:31.069291 | 2021-07-14T16:25:50 | 2021-07-14T16:25:50 | 353,711,310 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def lineare_korrelation(a, b):
korrelationskoeffizient = np.corrcoef(a, b)[0][1]
kovarianz = np.cov(a, b)[0][1]
print(f"(empirischer) Korrelationskoeffizient\t", "np.corrcoef(...)\t", f"r_x,y = {korrelationskoeffizient}")
print(f"(empirische) Kovarianz\t\t\t", "np.cov(...)\t\t", f"s_x,y = {kovarianz}")
def lineare_regression(a, b):
reg = stats.linregress(a, b)
print("Lineare Regression")
print(f"f(x) = {reg.slope:.3f} * x + {reg.intercept:.3f}")
print(f"Bestimmheitsmaß: R² = {reg.rvalue**2}")
def plot_regression(a, b):
reg = stats.linregress(a, b)
plt.plot(a, b, 'o', label='original data')
plt.plot(a, reg.intercept + reg.slope * np.asarray(a), 'r', label='fitted line')
plt.legend()
plt.show()
| UTF-8 | Python | false | false | 840 | py | 13 | multivariate_statistik.py | 11 | 0.636038 | 0.626492 | 0 | 28 | 28.928571 | 113 |
AtomikKeo/ProgrammingContestPractice | 3,332,894,629,908 | 8d539bbd68cf165857879ea93afc6144826e74ab | e0c5d703ab688e6a1f8384279d9ce46cf0f667c7 | /AtCoder/ABC/66/ss.py | 733f483d321037ca63be9df18d69be72e76ad7da | []
| no_license | https://github.com/AtomikKeo/ProgrammingContestPractice | a9b739156981f8c1b7213549ff2f258c4c6af8d4 | 52140087e825fad3be3a19c73814e70c434e0f0c | refs/heads/master | 2018-04-05T04:57:07.735681 | 2018-02-26T14:27:21 | 2018-02-26T14:27:21 | 89,137,597 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s = input()
for i in range(1, len(s)):
if((len(s)-i) % 2 == 0):
ans = len(s) - i
if(s[0:ans//2] == s[ans//2:ans]):
print(ans)
break
| UTF-8 | Python | false | false | 176 | py | 66 | ss.py | 66 | 0.386364 | 0.352273 | 0 | 7 | 24.142857 | 41 |
bbmhmmad/django | 6,820,408,105,748 | 652069c451fc365fd1b0c38b582906b43b44c447 | f3100ddd6c7f20538949785fce97960ba07f4128 | /poke_app/apps/poke_app/migrations/0001_initial.py | be7be1cc62107330aaf1baa49dd56cf8dbba01db | []
| no_license | https://github.com/bbmhmmad/django | bfb14ea307515ae8675d404195dae9374e1a8550 | 5b6cc59bb2c40d7f4c7fc57e8b02e0c8c477ff9c | refs/heads/master | 2021-01-17T11:20:07.160572 | 2017-03-07T08:58:51 | 2017-03-07T08:58:51 | 84,030,826 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-18 16:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Poke',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('poked', models.ManyToManyField(related_name='user_poked', to='login_app.User')),
('poker', models.ManyToManyField(related_name='user_poker', to='login_app.User')),
],
),
]
| UTF-8 | Python | false | false | 860 | py | 23 | 0001_initial.py | 12 | 0.576744 | 0.552326 | 0 | 27 | 30.851852 | 114 |
ash9750/Re0-ML | 11,312,943,867,671 | db98133ed3d5d503bc92008b237a826b6fcb41f7 | bf1f5acf58438c0eeed4e822ff5dfcafa4d5f934 | /balanced_kdtree.py | 80f1aa23d128ea883e05caebb1acf2cd8300c3aa | []
| no_license | https://github.com/ash9750/Re0-ML | 1d5ac183a724461ddd81af5bbc4db5ae482c33c4 | 094cc72b16bf55892213ec355de4199033d4ff06 | refs/heads/master | 2021-01-21T11:15:12.079280 | 2019-04-06T13:26:19 | 2019-04-06T13:26:19 | 83,539,185 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node:
def __init__(self, point, dimension, depth, left=[], right=[]):
self.left = left
self.right = right
self.point = point
self.dimension = dimension
self.depth = depth
def median(data):
m = int(len(data) / 2)
return data[m], m
def build_tree(data, dimension, node_list, k, depth=0):
if depth < k:
data = sorted(data, key=lambda x: x[dimension])
data_median, index_medina = median(data)
del data[index_medina]
node_list.append(Node(data_median, dimension, depth, left=data[:index_medina],
right=data[index_medina:]))
depth += 1
if index_medina > 0: build_tree(data[:index_medina], not dimension, node_list, k, depth=depth)
if len(data) > 1: build_tree(data[index_medina:], not dimension, node_list, k, depth=depth)
return node_list
def train(data):
node_list = []
dimension = False
node_list = build_tree(data, dimension, node_list, k=3)
for i in range(len(node_list)):
print("node:", i, "\tpoint:", node_list[i].point, "\tdepth:", node_list[i].depth,
"\tdimension:", node_list[i].dimension, "\tleft:", node_list[i].left,
"right:", node_list[i].right)
T = [[2, 3], [5, 4], [9, 6], [4, 7], [8, 1], [7, 2]]
train(T)
| UTF-8 | Python | false | false | 1,369 | py | 4 | balanced_kdtree.py | 3 | 0.552958 | 0.53981 | 0 | 40 | 32.225 | 102 |
aCoffeeYin/pyreco | 16,140,487,119,963 | 31977ba1fa30d78438c128d5ce598b796198cfc1 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/openembedded-oe-core/allPythonContent.py | a0f23ebc945949a2b8fe7601b1af6dc68986c47f | []
| no_license | https://github.com/aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __FILENAME__ = buildhistory_analysis
# Report significant differences in the buildhistory repository since a specific revision
#
# Copyright (C) 2012 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
#
import sys
import os.path
import difflib
import git
import re
import bb.utils
# How to display fields
list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
list_order_fields = ['PACKAGES']
defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
numeric_fields = ['PKGSIZE', 'IMAGESIZE']
# Fields to monitor
monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
# Percentage change to alert for numeric fields
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
related_fields = {}
related_fields['RDEPENDS'] = ['DEPENDS']
related_fields['RRECOMMENDS'] = ['DEPENDS']
related_fields['FILELIST'] = ['FILES']
related_fields['PKGSIZE'] = ['FILELIST']
related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
self.path = path
self.fieldname = fieldname
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
self.related = []
self.filechanges = None
def __str__(self):
return self._str_internal(True)
def _str_internal(self, outer):
if outer:
if '/image-files/' in self.path:
prefix = '%s: ' % self.path.split('/image-files/')[0]
else:
prefix = '%s: ' % self.path
else:
prefix = ''
def pkglist_combine(depver):
pkglist = []
for k,v in depver.iteritems():
if v:
pkglist.append("%s (%s)" % (k,v))
else:
pkglist.append(k)
return pkglist
if self.fieldname in list_fields or self.fieldname in list_order_fields:
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
aitems = self.oldvalue.split()
bitems = self.newvalue.split()
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
if removed or added:
if removed and not bitems:
out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
else:
out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
else:
out = '%s changed order' % self.fieldname
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
bval = int(self.newvalue or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
elif self.fieldname in defaultval_map:
out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
if self.oldvalue and self.newvalue:
out = '%s changed:\n ' % self.fieldname
elif self.newvalue:
out = '%s added:\n ' % self.fieldname
elif self.oldvalue:
out = '%s cleared:\n ' % self.fieldname
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
fieldname = self.fieldname
if '/image-files/' in self.path:
fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
out = 'Changes to %s:\n ' % fieldname
else:
if outer:
prefix = 'Changes to %s ' % self.path
out = '(%s):\n ' % self.fieldname
if self.filechanges:
out += '\n '.join(['%s' % i for i in self.filechanges])
else:
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
out += '\n '.join(list(diff))
out += '\n --'
else:
out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
if self.related:
for chg in self.related:
if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
continue
for line in chg._str_internal(False).splitlines():
out += '\n * %s' % line
return '%s%s' % (prefix, out)
class FileChange:
changetype_add = 'A'
changetype_remove = 'R'
changetype_type = 'T'
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
self.changetype = changetype
self.oldvalue = oldvalue
self.newvalue = newvalue
def _ftype_str(self, ftype):
if ftype == '-':
return 'file'
elif ftype == 'd':
return 'directory'
elif ftype == 'l':
return 'symlink'
elif ftype == 'c':
return 'char device'
elif ftype == 'b':
return 'block device'
elif ftype == 'p':
return 'fifo'
elif ftype == 's':
return 'socket'
else:
return 'unknown (%s)' % ftype
def __str__(self):
if self.changetype == self.changetype_add:
return '%s was added' % self.path
elif self.changetype == self.changetype_remove:
return '%s was removed' % self.path
elif self.changetype == self.changetype_type:
return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
elif self.changetype == self.changetype_perms:
return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_ownergroup:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
else:
return '%s changed (unknown)' % self.path
def blob_to_dict(blob):
alines = blob.data_stream.read().splitlines()
adict = {}
for line in alines:
splitv = [i.strip() for i in line.split('=',1)]
if len(splitv) > 1:
adict[splitv[0]] = splitv[1]
return adict
def file_list_to_dict(lines):
adict = {}
for line in lines:
# Leave the last few fields intact so we handle file names containing spaces
splitv = line.split(None,4)
# Grab the path and remove the leading .
path = splitv[4][1:].strip()
# Handle symlinks
if(' -> ' in path):
target = path.split(' -> ')[1]
path = path.split(' -> ')[0]
adict[path] = splitv[0:3] + [target]
else:
adict[path] = splitv[0:3]
return adict
def compare_file_lists(alines, blines):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
for path, splitv in adict.iteritems():
newsplitv = bdict.pop(path, None)
if newsplitv:
# Check type
oldvalue = splitv[0][0]
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
# Check owner/group
oldvalue = '%s/%s' % (splitv[1], splitv[2])
newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
oldvalue = splitv[3]
else:
oldvalue = None
newvalue = newsplitv[3]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
filechanges.append(FileChange(path, FileChange.changetype_remove))
# Whatever is left over has been added
for path in bdict:
filechanges.append(FileChange(path, FileChange.changetype_add))
return filechanges
def compare_lists(alines, blines):
removed = list(set(alines) - set(blines))
added = list(set(blines) - set(alines))
filechanges = []
for pkg in removed:
filechanges.append(FileChange(pkg, FileChange.changetype_remove))
for pkg in added:
filechanges.append(FileChange(pkg, FileChange.changetype_add))
return filechanges
def compare_pkg_lists(astr, bstr):
depvera = bb.utils.explode_dep_versions2(astr)
depverb = bb.utils.explode_dep_versions2(bstr)
# Strip out changes where the version has increased
remove = []
for k in depvera:
if k in depverb:
dva = depvera[k]
dvb = depverb[k]
if dva and dvb and len(dva) == len(dvb):
# Since length is the same, sort so that prefixes (e.g. >=) will line up
dva.sort()
dvb.sort()
removeit = True
for dvai, dvbi in zip(dva, dvb):
if dvai != dvbi:
aiprefix = dvai.split(' ')[0]
biprefix = dvbi.split(' ')[0]
if aiprefix == biprefix and aiprefix in ['>=', '=']:
if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
removeit = False
break
else:
removeit = False
break
if removeit:
remove.append(k)
for k in remove:
depvera.pop(k)
depverb.pop(k)
return (depvera, depverb)
def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
adict = blob_to_dict(ablob)
bdict = blob_to_dict(bblob)
pkgname = os.path.basename(path)
defaultvals = {}
defaultvals['PKG'] = pkgname
defaultvals['PKGE'] = '0'
changes = []
keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
for key in keys:
astr = adict.get(key, '')
bstr = bdict.get(key, '')
if key in ver_monitor_fields:
monitored = report_ver or astr or bstr
else:
monitored = key in monitor_fields
mapped_key = defaultval_map.get(key, '')
if mapped_key:
if not astr:
astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
if not bstr:
bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
if astr != bstr:
if (not report_all) and key in numeric_fields:
aval = int(astr or 0)
bval = int(bstr or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
alist = astr.split()
alist.sort()
blist = bstr.split()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
alist.remove(pkgname)
if ' '.join(alist) == ' '.join(blist):
continue
chg = ChangeRecord(path, key, astr, bstr, monitored)
changes.append(chg)
return changes
def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename == 'latest':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
changes.append(chg)
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
if filename == 'files-in-image.txt':
alines = d.a_blob.data_stream.read().splitlines()
blines = d.b_blob.data_stream.read().splitlines()
filechanges = compare_file_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
elif filename == 'installed-package-names.txt':
alines = d.a_blob.data_stream.read().splitlines()
blines = d.b_blob.data_stream.read().splitlines()
filechanges = compare_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
else:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
changes.append(chg)
elif filename == 'image-info.txt':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif '/image-files/' in path:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
changes.append(chg)
# Look for added preinst/postinst/prerm/postrm
# (without reporting newly added recipes)
addedpkgs = []
addedchanges = []
for d in diff.iter_change_type('A'):
path = os.path.dirname(d.b_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.b_blob.path)
if filename == 'latest':
addedpkgs.append(path)
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True)
addedchanges.append(chg)
for chg in addedchanges:
found = False
for pkg in addedpkgs:
if chg.path.startswith(pkg):
found = True
break
if not found:
changes.append(chg)
# Look for cleared preinst/postinst/prerm/postrm
for d in diff.iter_change_type('D'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename != 'latest' and filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True)
changes.append(chg)
# Link related changes
for chg in changes:
if chg.monitored:
for chg2 in changes:
# (Check dirname in the case of fields from recipe info files)
if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
if chg2.fieldname in related_fields.get(chg.fieldname, []):
chg.related.append(chg2)
elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
chg.related.append(chg2)
if report_all:
return changes
else:
return [chg for chg in changes if chg.monitored]
########NEW FILE########
__FILENAME__ = cachedpath
#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
#
import os
import errno
import stat as statmod
class CachedPath(object):
def __init__(self):
self.statcache = {}
self.lstatcache = {}
self.normpathcache = {}
return
def updatecache(self, x):
x = self.normpath(x)
if x in self.statcache:
del self.statcache[x]
if x in self.lstatcache:
del self.lstatcache[x]
def normpath(self, path):
if path in self.normpathcache:
return self.normpathcache[path]
newpath = os.path.normpath(path)
self.normpathcache[path] = newpath
return newpath
def _callstat(self, path):
if path in self.statcache:
return self.statcache[path]
try:
st = os.stat(path)
self.statcache[path] = st
return st
except os.error:
self.statcache[path] = False
return False
# We might as well call lstat and then only
# call stat as well in the symbolic link case
# since this turns out to be much more optimal
# in real world usage of this cache
def callstat(self, path):
path = self.normpath(path)
self.calllstat(path)
return self.statcache[path]
def calllstat(self, path):
path = self.normpath(path)
if path in self.lstatcache:
return self.lstatcache[path]
#bb.error("LStatpath:" + path)
try:
lst = os.lstat(path)
self.lstatcache[path] = lst
if not statmod.S_ISLNK(lst.st_mode):
self.statcache[path] = lst
else:
self._callstat(path)
return lst
except (os.error, AttributeError):
self.lstatcache[path] = False
self.statcache[path] = False
return False
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(self, path):
"""Test whether a path is a regular file"""
st = self.callstat(path)
if not st:
return False
return statmod.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(self, s):
"""Return true if the pathname refers to an existing directory."""
st = self.callstat(s)
if not st:
return False
return statmod.S_ISDIR(st.st_mode)
def islink(self, path):
"""Test whether a path is a symbolic link"""
st = self.calllstat(path)
if not st:
return False
return statmod.S_ISLNK(st.st_mode)
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(self, path):
"""Test whether a path exists. Returns False for broken symbolic links"""
if self.callstat(path):
return True
return False
def lexists(self, path):
"""Test whether a path exists. Returns True for broken symbolic links"""
if self.calllstat(path):
return True
return False
def stat(self, path):
return self.callstat(path)
def lstat(self, path):
return self.calllstat(path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# Matches os.walk, not os.path.walk()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not self.islink(new_path):
for x in self.walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
## realpath() related functions
def __is_path_below(self, file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = self.__realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(self.__is_path_below(start, root))
return start
def __realpath(self, file, root, loop_cnt, assume_dir):
while self.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(self.__is_path_below(tdir, root))
else:
tdir = root
file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = self.isdir(file)
except:
is_dir = False
return (file, is_dir)
def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not self.__is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file
########NEW FILE########
__FILENAME__ = classextend
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
def extend_name(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
return name
if name.startswith("rtld"):
return name
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
var = self.d.getVar(varname, True)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_regexp_variable(self, varname, setvar = True):
var = self.d.getVar(varname, True)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
if v.startswith("^" + self.extname):
newvar.append(v)
elif v.startswith("^"):
newvar.append("^" + self.extname + "-" + v[1:])
else:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_depends(self, dep):
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)
def map_depends_variable(self, varname, suffix = ""):
if suffix:
varname = varname + "_" + suffix
deps = self.d.getVar(varname, True)
if not deps:
return
deps = bb.utils.explode_dep_versions2(deps)
newdeps = {}
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
self.d.setVar(varname, bb.utils.join_deps(newdeps, False))
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
self.map_depends_variable("RPROVIDES", pkg)
self.map_depends_variable("RREPLACES", pkg)
self.map_depends_variable("RCONFLICTS", pkg)
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
for pkg in (self.d.getVar("PACKAGES", True) or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
for subs in variables:
self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
return dep + "-crosssdk"
elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)
########NEW FILE########
__FILENAME__ = classutils
class ClassRegistry(type):
"""Maintain a registry of classes, indexed by name.
Note that this implementation requires that the names be unique, as it uses
a dictionary to hold the classes by name.
The name in the registry can be overridden via the 'name' attribute of the
class, and the 'priority' attribute controls priority. The prioritized()
method returns the registered classes in priority order.
Subclasses of ClassRegistry may define an 'implemented' property to exert
control over whether the class will be added to the registry (e.g. to keep
abstract base classes out of the registry)."""
priority = 0
class __metaclass__(type):
"""Give each ClassRegistry their own registry"""
def __init__(cls, name, bases, attrs):
cls.registry = {}
type.__init__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
super(ClassRegistry, cls).__init__(name, bases, attrs)
try:
if not cls.implemented:
return
except AttributeError:
pass
try:
cls.name
except AttributeError:
cls.name = name
cls.registry[cls.name] = cls
@classmethod
def prioritized(tcls):
return sorted(tcls.registry.values(),
key=lambda v: v.priority, reverse=True)
def unregister(cls):
for key in cls.registry.keys():
if cls.registry[key] is cls:
del cls.registry[key]
########NEW FILE########
__FILENAME__ = data
import oe.maketype
def typed_value(key, d):
"""Construct a value for the specified metadata variable, using its flags
to determine the type and parameters for construction."""
var_type = d.getVarFlag(key, 'type')
flags = d.getVarFlags(key)
if flags is not None:
flags = dict((flag, d.expand(value))
for flag, value in flags.iteritems())
else:
flags = {}
try:
return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
except (TypeError, ValueError), exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
########NEW FILE########
__FILENAME__ = distro_check
def get_links_from_url(url):
"Return all the href links found on the web location"
import urllib, sgmllib
class LinksParser(sgmllib.SGMLParser):
def parse(self, s):
"Parse the given string 's'."
self.feed(s)
self.close()
def __init__(self, verbose=0):
"Initialise an object passing 'verbose' to the superclass."
sgmllib.SGMLParser.__init__(self, verbose)
self.hyperlinks = []
def start_a(self, attributes):
"Process a hyperlink and its 'attributes'."
for name, value in attributes:
if name == "href":
self.hyperlinks.append(value.strip('/'))
def get_hyperlinks(self):
"Return the list of hyperlinks."
return self.hyperlinks
sock = urllib.urlopen(url)
webpage = sock.read()
sock.close()
linksparser = LinksParser()
linksparser.parse(webpage)
return linksparser.get_hyperlinks()
def find_latest_numeric_release(url):
"Find the latest listed numeric release on the given url"
max=0
maxstr=""
for link in get_links_from_url(url):
try:
release = float(link)
except:
release = 0
if release > max:
max = release
maxstr = link
return maxstr
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
if name[-8:] == ".src.rpm":
return True
else:
return False
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
strings = srpm.split('-')
package_name = strings[0]
for i in range(1, len (strings) - 1):
str = strings[i]
if not str[0].isdigit():
package_name += '-' + str
return package_name
def clean_package_list(package_list):
"Removes multiple entries of packages and sorts the list"
set = {}
map(set.__setitem__, package_list, [])
return set.keys()
def get_latest_released_meego_source_package_list():
"Returns list of all the name os packages in the latest meego distro"
package_names = []
try:
f = open("/tmp/Meego-1.1", "r")
for line in f:
package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
except IOError: pass
package_list=clean_package_list(package_names)
return "1.0", package_list
def get_source_package_list_from_url(url, section):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url)
srpms = filter(is_src_rpm, links)
names_list = map(package_name_from_srpm, srpms)
new_pkgs = []
for pkgs in names_list:
new_pkgs.append(pkgs + ":" + section)
return new_pkgs
def get_latest_released_fedora_source_package_list():
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def get_latest_released_opensuse_source_package_list():
"Returns list of all the name os packages in the latest opensuse distro"
latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def get_latest_released_mandriva_source_package_list():
"Returns list of all the name os packages in the latest mandriva distro"
latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def find_latest_debian_release(url):
"Find the latest listed debian release on the given url"
releases = []
for link in get_links_from_url(url):
if link[:6] == "Debian":
if ';' not in link:
releases.append(link)
releases.sort()
try:
return releases.pop()[6:]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section):
"Return the list of package-names stored in the debian style Sources.gz file"
import urllib
sock = urllib.urlopen(url)
import tempfile
tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
tmpfilename=tmpfile.name
tmpfile.write(sock.read())
sock.close()
tmpfile.close()
import gzip
bb.note("Reading %s: %s" % (url, section))
f = gzip.open(tmpfilename)
package_names = []
for line in f:
if line[:9] == "Package: ":
package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
os.unlink(tmpfilename)
return package_names
def get_latest_released_debian_source_package_list():
"Returns list of all the name os packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main")
# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
# package_names += get_debian_style_source_package_list(url, "contrib")
url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
package_names += get_debian_style_source_package_list(url, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def find_latest_ubuntu_release(url):
"Find the latest listed ubuntu release on the given url"
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url):
if link[-8:] == "-updates":
return link[:-8]
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list():
"Returns list of all the name os packages in the latest ubuntu distro"
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main")
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "multiverse")
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "universe")
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
package_names += get_debian_style_source_package_list(url, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def create_distro_packages_list(distro_check_dir):
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
if not os.path.isdir (pkglst_dir):
os.makedirs(pkglst_dir)
# first clear old stuff
for file in os.listdir(pkglst_dir):
os.unlink(os.path.join(pkglst_dir, file))
per_distro_functions = [
["Debian", get_latest_released_debian_source_package_list],
["Ubuntu", get_latest_released_ubuntu_source_package_list],
["Fedora", get_latest_released_fedora_source_package_list],
["OpenSuSE", get_latest_released_opensuse_source_package_list],
["Mandriva", get_latest_released_mandriva_source_package_list],
["Meego", get_latest_released_meego_source_package_list]
]
from datetime import datetime
begin = datetime.now()
for distro in per_distro_functions:
name = distro[0]
release, package_list = distro[1]()
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
f = open(package_list_file, "w+b")
for pkg in package_list:
f.write(pkg + "\n")
f.close()
end = datetime.now()
delta = end - begin
bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
def update_distro_data(distro_check_dir, datetime):
"""
If distro packages list data is old then rebuild it.
The operations has to be protected by a lock so that
only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
bb.note ("Making new directory: %s" % distro_check_dir)
os.makedirs (distro_check_dir)
except OSError:
raise Exception('Unable to create directory %s' % (distro_check_dir))
datetime_file = os.path.join(distro_check_dir, "build_datetime")
saved_datetime = "_invalid_"
import fcntl
try:
if not os.path.exists(datetime_file):
open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
f = open(datetime_file, "r+b")
fcntl.lockf(f, fcntl.LOCK_EX)
saved_datetime = f.read()
if saved_datetime[0:8] != datetime[0:8]:
bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
bb.note("Regenerating distro package lists")
create_distro_packages_list(distro_check_dir)
f.seek(0)
f.write(datetime)
except OSError:
raise Exception('Unable to read/write this file: %s' % (datetime_file))
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
pn = d.getVar('PN', True)
recipe_name = d.getVar('PN', True)
bb.note("Checking: %s" % pn)
trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
if tmp:
list = tmp.split(' ')
for str in list:
if str and str.find("=") == -1 and distro_exceptions[str]:
matching_distros.append(str)
distro_pn_aliases = {}
if tmp:
list = tmp.split(' ')
for str in list:
if str.find("=") != -1:
(dist, pn_alias) = str.split('=')
distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
f = open(os.path.join(pkglst_dir, file), "rb")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
pn = distro_pn_aliases[distro.lower()]
else:
pn = recipe_name
if pn == pkg:
matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
f.close()
break
f.close()
if tmp != None:
list = tmp.split(' ')
for item in list:
matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
import subprocess
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
subprocess.call("touch %s" % logfile, shell=True)
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
pn = d.getVar('PN', True)
logdir = d.getVar('LOG_DIR', True)
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
if not os.path.isdir(logdir):
os.makedirs(logdir)
line = pn
for i in result:
line = line + "," + i
f = open(result_file, "a")
import fcntl
fcntl.lockf(f, fcntl.LOCK_EX)
f.seek(0, os.SEEK_END) # seek to the end of file
f.write(line + "\n")
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
########NEW FILE########
__FILENAME__ = image
from oe.utils import execute_pre_post_process
import os
import subprocess
import multiprocessing
def generate_image(arg):
(type, subimages, create_img_cmd) = arg
bb.note("Running image creation script for %s: %s ..." %
(type, create_img_cmd))
try:
subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Error: The image creation script '%s' returned %d:\n%s" %
(e.cmd, e.returncode, e.output))
return None
"""
This class will help compute IMAGE_FSTYPE dependencies and group them in batches
that can be executed in parallel.
The next example is for illustration purposes, highly unlikely to happen in real life.
It's just one of the test cases I used to test the algorithm:
For:
IMAGE_FSTYPES = "i1 i2 i3 i4 i5"
IMAGE_TYPEDEP_i4 = "i2"
IMAGE_TYPEDEP_i5 = "i6 i4"
IMAGE_TYPEDEP_i6 = "i7"
IMAGE_TYPEDEP_i7 = "i2"
We get the following list of batches that can be executed in parallel, having the
dependencies satisfied:
[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']]
"""
class ImageDepGraph(object):
def __init__(self, d):
self.d = d
self.graph = dict()
self.deps_array = dict()
def _construct_dep_graph(self, image_fstypes):
graph = dict()
def add_node(node):
deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "")
if deps != "":
graph[node] = deps
for dep in deps.split():
if not dep in graph:
add_node(dep)
else:
graph[node] = ""
for fstype in image_fstypes:
add_node(fstype)
return graph
def _clean_graph(self):
# Live and VMDK images will be processed via inheriting
# bbclass and does not get processed here. Remove them from the fstypes
# graph. Their dependencies are already added, so no worries here.
remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
for item in remove_list:
self.graph.pop(item, None)
def _compute_dependencies(self):
"""
returns dict object of nodes with [no_of_depends_on, no_of_depended_by]
for each node
"""
deps_array = dict()
for node in self.graph:
deps_array[node] = [0, 0]
for node in self.graph:
deps = self.graph[node].split()
deps_array[node][0] += len(deps)
for dep in deps:
deps_array[dep][1] += 1
return deps_array
def _sort_graph(self):
sorted_list = []
group = []
for node in self.graph:
if node not in self.deps_array:
continue
depends_on = self.deps_array[node][0]
if depends_on == 0:
group.append(node)
if len(group) == 0 and len(self.deps_array) != 0:
bb.fatal("possible fstype circular dependency...")
sorted_list.append(group)
# remove added nodes from deps_array
for item in group:
for node in self.graph:
if item in self.graph[node]:
self.deps_array[node][0] -= 1
self.deps_array.pop(item, None)
if len(self.deps_array):
# recursive call, to find the next group
sorted_list += self._sort_graph()
return sorted_list
def group_fstypes(self, image_fstypes):
self.graph = self._construct_dep_graph(image_fstypes)
self._clean_graph()
self.deps_array = self._compute_dependencies()
alltypes = [node for node in self.graph]
return (alltypes, self._sort_graph())
class Image(ImageDepGraph):
def __init__(self, d):
self.d = d
super(Image, self).__init__(d)
def _get_rootfs_size(self):
"""compute the rootfs size"""
rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True))
rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True))
rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
output = subprocess.check_output(['du', '-ks',
self.d.getVar('IMAGE_ROOTFS', True)])
size_kb = int(output.split()[0])
base_size = size_kb * overhead_factor
base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \
rootfs_extra_space
if base_size != int(base_size):
base_size = int(base_size + 1)
base_size += rootfs_alignment - 1
base_size -= base_size % rootfs_alignment
return base_size
def _create_symlinks(self, subimages):
"""create symlinks to the newly created image"""
deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
img_name = self.d.getVar('IMAGE_NAME', True)
link_name = self.d.getVar('IMAGE_LINK_NAME', True)
manifest_name = self.d.getVar('IMAGE_MANIFEST', True)
os.chdir(deploy_dir)
if link_name is not None:
for type in subimages:
if os.path.exists(img_name + ".rootfs." + type):
dst = link_name + "." + type
src = img_name + ".rootfs." + type
bb.note("Creating symlink: %s -> %s" % (dst, src))
os.symlink(src, dst)
if manifest_name is not None and \
os.path.exists(manifest_name) and \
not os.path.exists(link_name + ".manifest"):
os.symlink(os.path.basename(manifest_name),
link_name + ".manifest")
def _remove_old_symlinks(self):
"""remove the symlinks to old binaries"""
if self.d.getVar('IMAGE_LINK_NAME', True):
deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
for img in os.listdir(deploy_dir):
if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0:
img = os.path.join(deploy_dir, img)
if os.path.islink(img):
if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \
os.path.exists(os.path.realpath(img)):
os.remove(os.path.realpath(img))
os.remove(img)
"""
This function will just filter out the compressed image types from the
fstype groups returning a (filtered_fstype_groups, cimages) tuple.
"""
def _filter_out_commpressed(self, fstype_groups):
ctypes = self.d.getVar('COMPRESSIONTYPES', True).split()
cimages = {}
filtered_groups = []
for group in fstype_groups:
filtered_group = []
for type in group:
basetype = None
for ctype in ctypes:
if type.endswith("." + ctype):
basetype = type[:-len("." + ctype)]
if basetype not in filtered_group:
filtered_group.append(basetype)
if basetype not in cimages:
cimages[basetype] = []
if ctype not in cimages[basetype]:
cimages[basetype].append(ctype)
break
if not basetype and type not in filtered_group:
filtered_group.append(type)
filtered_groups.append(filtered_group)
return (filtered_groups, cimages)
def _get_image_types(self):
"""returns a (types, cimages) tuple"""
alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split())
filtered_groups, cimages = self._filter_out_commpressed(fstype_groups)
return (alltypes, filtered_groups, cimages)
def _write_script(self, type, cmds):
tempdir = self.d.getVar('T', True)
script_name = os.path.join(tempdir, "create_image." + type)
self.d.setVar('img_creation_func', '\n'.join(cmds))
self.d.setVarFlag('img_creation_func', 'func', 1)
self.d.setVarFlag('img_creation_func', 'fakeroot', 1)
with open(script_name, "w+") as script:
script.write("%s" % bb.build.shell_trap_code())
script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size())
bb.data.emit_func('img_creation_func', script, self.d)
script.write("img_creation_func\n")
os.chmod(script_name, 0775)
return script_name
def _get_imagecmds(self):
old_overrides = self.d.getVar('OVERRIDES', 0)
alltypes, fstype_groups, cimages = self._get_image_types()
image_cmd_groups = []
bb.note("The image creation groups are: %s" % str(fstype_groups))
for fstype_group in fstype_groups:
image_cmds = []
for type in fstype_group:
cmds = []
subimages = []
localdata = bb.data.createCopy(self.d)
localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
bb.data.update_data(localdata)
localdata.setVar('type', type)
cmds.append("\t" + localdata.getVar("IMAGE_CMD", True))
cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
if type in cimages:
for ctype in cimages[type]:
cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
subimages.append(type + "." + ctype)
if type not in alltypes:
cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
else:
subimages.append(type)
script_name = self._write_script(type, cmds)
image_cmds.append((type, subimages, script_name))
image_cmd_groups.append(image_cmds)
return image_cmd_groups
def create(self):
bb.note("###### Generate images #######")
pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True)
post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
execute_pre_post_process(self.d, pre_process_cmds)
self._remove_old_symlinks()
image_cmd_groups = self._get_imagecmds()
for image_cmds in image_cmd_groups:
# create the images in parallel
nproc = multiprocessing.cpu_count()
pool = bb.utils.multiprocessingpool(nproc)
results = list(pool.imap(generate_image, image_cmds))
pool.close()
pool.join()
for result in results:
if result is not None:
bb.fatal(result)
for image_type, subimages, script in image_cmds:
bb.note("Creating symlinks for %s image ..." % image_type)
self._create_symlinks(subimages)
execute_pre_post_process(self.d, post_process_cmds)
def create_image(d):
Image(d).create()
if __name__ == "__main__":
"""
Image creation can be called independent from bitbake environment.
"""
"""
TBD
"""
########NEW FILE########
__FILENAME__ = license
# vi:sts=4:sw=4:et
"""Code for parsing OpenEmbedded license strings"""
import ast
import re
from fnmatch import fnmatchcase as fnmatch
class LicenseError(Exception):
pass
class LicenseSyntaxError(LicenseError):
def __init__(self, licensestr, exc):
self.licensestr = licensestr
self.exc = exc
LicenseError.__init__(self)
def __str__(self):
return "error in '%s': %s" % (self.licensestr, self.exc)
class InvalidLicense(LicenseError):
def __init__(self, license):
self.license = license
LicenseError.__init__(self)
def __str__(self):
return "invalid characters in license '%s'" % self.license
license_operator = re.compile('([&|() ])')
license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Syntax tree visitor which can accept OpenEmbedded license strings"""
def visit_string(self, licensestr):
new_elements = []
elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos-1]):
new_elements.append('&')
element = '"' + element + '"'
elif not license_operator.match(element):
raise InvalidLicense(element)
new_elements.append(element)
self.visit(ast.parse(' '.join(new_elements)))
class FlattenVisitor(LicenseVisitor):
"""Flatten a license tree (parsed from a string) by selecting one of each
set of OR options, in the way the user specifies"""
def __init__(self, choose_licenses):
self.choose_licenses = choose_licenses
self.licenses = []
LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.licenses.append(node.s)
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
left.visit(node.left)
right = FlattenVisitor(self.choose_licenses)
right.visit(node.right)
selected = self.choose_licenses(left.licenses, right.licenses)
self.licenses.extend(selected)
else:
self.generic_visit(node)
def flattened_licenses(licensestr, choose_licenses):
"""Given a license string and choose_licenses function, return a flat list of licenses"""
flatten = FlattenVisitor(choose_licenses)
try:
flatten.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
def is_included(licensestr, whitelist=None, blacklist=None):
"""Given a license string and whitelist and blacklist, determine if the
license string matches the whitelist and does not match the blacklist.
Returns a tuple holding the boolean state and a list of the applicable
licenses which were excluded (or None, if the state is True)
"""
def include_license(license):
return any(fnmatch(license, pattern) for pattern in whitelist)
def exclude_license(license):
return any(fnmatch(license, pattern) for pattern in blacklist)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses)."""
alpha_weight = len(filter(include_license, alpha))
beta_weight = len(filter(include_license, beta))
if alpha_weight > beta_weight:
return alpha
else:
return beta
if not whitelist:
whitelist = ['*']
if not blacklist:
blacklist = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = filter(lambda lic: exclude_license(lic), licenses)
included = filter(lambda lic: include_license(lic), licenses)
if excluded:
return False, excluded
else:
return True, included
########NEW FILE########
__FILENAME__ = lsb
def release_dict():
"""Return the output of lsb_release -ir as a dictionary"""
from subprocess import PIPE
try:
output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
except bb.process.CmdError as exc:
return None
data = {}
for line in output.splitlines():
try:
key, value = line.split(":\t", 1)
except ValueError:
continue
else:
data[key] = value
return data
def release_dict_file():
""" Try to gather LSB release information manually when lsb_release tool is unavailable """
data = None
try:
if os.path.exists('/etc/lsb-release'):
data = {}
with open('/etc/lsb-release') as f:
for line in f:
key, value = line.split("=", 1)
data[key] = value.strip()
elif os.path.exists('/etc/redhat-release'):
data = {}
with open('/etc/redhat-release') as f:
distro = f.readline().strip()
import re
match = re.match(r'(.*) release (.*) \((.*)\)', distro)
if match:
data['DISTRIB_ID'] = match.group(1)
data['DISTRIB_RELEASE'] = match.group(2)
elif os.path.exists('/etc/SuSE-release'):
data = {}
data['DISTRIB_ID'] = 'SUSE LINUX'
with open('/etc/SuSE-release') as f:
for line in f:
if line.startswith('VERSION = '):
data['DISTRIB_RELEASE'] = line[10:].rstrip()
break
elif os.path.exists('/etc/os-release'):
data = {}
with open('/etc/os-release') as f:
for line in f:
if line.startswith('NAME='):
data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
if line.startswith('VERSION_ID='):
data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
except IOError:
return None
return data
def distro_identifier(adjust_hook=None):
"""Return a distro identifier string based upon lsb_release -ri,
with optional adjustment via a hook"""
lsb_data = release_dict()
if lsb_data:
distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
else:
lsb_data_file = release_dict_file()
if lsb_data_file:
distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
else:
distro_id, release = None, None
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
return "Unknown"
if release:
id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')
########NEW FILE########
__FILENAME__ = maketype
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
variable. Other flags may be utilized in the construction of the types. See
the arguments of the type's factory for details.
"""
import inspect
import types
available_types = {}
class MissingFlag(TypeError):
"""A particular flag is required to construct the type, but has not been
provided."""
def __init__(self, flag, type):
self.flag = flag
self.type = type
TypeError.__init__(self)
def __str__(self):
return "Type '%s' requires flag '%s'" % (self.type, self.flag)
def factory(var_type):
"""Return the factory for a specified type."""
if var_type is None:
raise TypeError("No type specified. Valid types: %s" %
', '.join(available_types))
try:
return available_types[var_type]
except KeyError:
raise TypeError("Invalid type '%s':\n Valid types: %s" %
(var_type, ', '.join(available_types)))
def create(value, var_type, **flags):
"""Create an object of the specified type, given the specified flags and
string value."""
obj = factory(var_type)
objflags = {}
for flag in obj.flags:
if flag not in flags:
if flag not in obj.optflags:
raise MissingFlag(flag, var_type)
else:
objflags[flag] = flags[flag]
return obj(value, **objflags)
def get_callable_args(obj):
"""Grab all but the first argument of the specified callable, returning
the list, as well as a list of which of the arguments have default
values."""
if type(obj) is type:
obj = obj.__init__
args, varargs, keywords, defaults = inspect.getargspec(obj)
flaglist = []
if args:
if len(args) > 1 and args[0] == 'self':
args = args[1:]
flaglist.extend(args)
optional = set()
if defaults:
optional |= set(flaglist[-len(defaults):])
return flaglist, optional
def factory_setup(name, obj):
"""Prepare a factory for use."""
args, optional = get_callable_args(obj)
extra_args = args[1:]
if extra_args:
obj.flags, optional = extra_args, optional
obj.optflags = set(optional)
else:
obj.flags = obj.optflags = ()
if not hasattr(obj, 'name'):
obj.name = name
def register(name, factory):
"""Register a type, given its name and a factory callable.
Determines the required and optional flags from the factory's
arguments."""
factory_setup(name, factory)
available_types[factory.name] = factory
# Register all our included types
for name in dir(types):
if name.startswith('_'):
continue
obj = getattr(types, name)
if not callable(obj):
continue
register(name, obj)
########NEW FILE########
__FILENAME__ = manifest
from abc import ABCMeta, abstractmethod
import os
import re
import bb
class Manifest(object):
"""
This is an abstract class. Do not instantiate this directly.
"""
__metaclass__ = ABCMeta
PKG_TYPE_MUST_INSTALL = "mip"
PKG_TYPE_MULTILIB = "mlp"
PKG_TYPE_LANGUAGE = "lgp"
PKG_TYPE_ATTEMPT_ONLY = "aop"
MANIFEST_TYPE_IMAGE = "image"
MANIFEST_TYPE_SDK_HOST = "sdk_host"
MANIFEST_TYPE_SDK_TARGET = "sdk_target"
var_maps = {
MANIFEST_TYPE_IMAGE: {
"PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
"PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
"LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
},
MANIFEST_TYPE_SDK_HOST: {
"TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
},
MANIFEST_TYPE_SDK_TARGET: {
"TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_TARGET_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
}
}
INSTALL_ORDER = [
PKG_TYPE_LANGUAGE,
PKG_TYPE_MUST_INSTALL,
PKG_TYPE_ATTEMPT_ONLY,
PKG_TYPE_MULTILIB
]
initial_manifest_file_header = \
"# This file was generated automatically and contains the packages\n" \
"# passed on to the package manager in order to create the rootfs.\n\n" \
"# Format:\n" \
"# <package_type>,<package_name>\n" \
"# where:\n" \
"# <package_type> can be:\n" \
"# 'mip' = must install package\n" \
"# 'aop' = attempt only package\n" \
"# 'mlp' = multilib package\n" \
"# 'lgp' = language package\n\n"
def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
self.d = d
self.manifest_type = manifest_type
if manifest_dir is None:
if manifest_type != self.MANIFEST_TYPE_IMAGE:
self.manifest_dir = self.d.getVar('SDK_DIR', True)
else:
self.manifest_dir = self.d.getVar('WORKDIR', True)
else:
self.manifest_dir = manifest_dir
bb.utils.mkdirhier(self.manifest_dir)
self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
# packages in the following vars will be split in 'must install' and
# 'multilib'
self.vars_to_split = ["PACKAGE_INSTALL",
"TOOLCHAIN_HOST_TASK",
"TOOLCHAIN_TARGET_TASK"]
"""
This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
This will be used for testing until the class is implemented properly!
"""
def _create_dummy_initial(self):
image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
pkg_list = dict()
if image_rootfs.find("core-image-sato-sdk") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-x11-sato-games packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-x11-base " \
"packagegroup-core-sdk packagegroup-core-tools-debug " \
"packagegroup-core-boot packagegroup-core-tools-testapps " \
"packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
"apt packagegroup-core-tools-profile psplash " \
"packagegroup-core-standalone-sdk-target " \
"packagegroup-core-ssh-openssh dpkg kernel-dev"
pkg_list[self.PKG_TYPE_LANGUAGE] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-sato") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
"packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-boot"
pkg_list['lgp'] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-minimal") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for pkg_type in pkg_list:
for pkg in pkg_list[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
"""
This will create the initial manifest which will be used by Rootfs class to
generate the rootfs
"""
@abstractmethod
def create_initial(self):
pass
"""
This creates the manifest after everything has been installed.
"""
@abstractmethod
def create_final(self):
pass
"""
This creates the manifest after the package in initial manifest has been
dummy installed. It lists all *to be installed* packages. There is no real
installation, just a test.
"""
@abstractmethod
def create_full(self, pm):
pass
"""
The following function parses an initial manifest and returns a dictionary
object with the must install, attempt only, multilib and language packages.
"""
def parse_initial_manifest(self):
pkgs = dict()
with open(self.initial_manifest) as manifest:
for line in manifest.read().split('\n'):
comment = re.match("^#.*", line)
pattern = "^(%s|%s|%s|%s),(.*)$" % \
(self.PKG_TYPE_MUST_INSTALL,
self.PKG_TYPE_ATTEMPT_ONLY,
self.PKG_TYPE_MULTILIB,
self.PKG_TYPE_LANGUAGE)
pkg = re.match(pattern, line)
if comment is not None:
continue
if pkg is not None:
pkg_type = pkg.group(1)
pkg_name = pkg.group(2)
if not pkg_type in pkgs:
pkgs[pkg_type] = [pkg_name]
else:
pkgs[pkg_type].append(pkg_name)
return pkgs
'''
This following function parses a full manifest and return a list
object with packages.
'''
def parse_full_manifest(self):
installed_pkgs = list()
if not os.path.exists(self.full_manifest):
bb.note('full manifest not exist')
return installed_pkgs
with open(self.full_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
installed_pkgs.append(pkg.strip())
return installed_pkgs
class RpmManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var, True))
if split_pkgs is not None:
pkgs = dict(pkgs.items() + split_pkgs.items())
else:
pkg_list = self.d.getVar(var, True)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
pass
class OpkgManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var, True))
if split_pkgs is not None:
pkgs = dict(pkgs.items() + split_pkgs.items())
else:
pkg_list = self.d.getVar(var, True)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
if not os.path.exists(self.initial_manifest):
self.create_initial()
initial_manifest = self.parse_initial_manifest()
pkgs_to_install = list()
for pkg_type in initial_manifest:
pkgs_to_install += initial_manifest[pkg_type]
if len(pkgs_to_install) == 0:
return
output = pm.dummy_install(pkgs_to_install)
with open(self.full_manifest, 'w+') as manifest:
pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
for line in set(output.split('\n')):
m = pkg_re.match(line)
if m:
manifest.write(m.group(1) + '\n')
return
class DpkgManifest(Manifest):
def create_initial(self):
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
pkg_list = self.d.getVar(var, True)
if pkg_list is None:
continue
for pkg in pkg_list.split():
manifest.write("%s,%s\n" %
(self.var_maps[self.manifest_type][var], pkg))
def create_final(self):
pass
def create_full(self, pm):
pass
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
manifest_map = {'rpm': RpmManifest,
'ipk': OpkgManifest,
'deb': DpkgManifest}
manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
else:
manifest.create_initial()
if __name__ == "__main__":
pass
########NEW FILE########
__FILENAME__ = package
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
#
# The elftype is a bit pattern (explained in split_and_strip_files) to tell
# us what type of file we're processing...
# 4 - executable
# 8 - shared library
# 16 - kernel module
import commands, stat, subprocess
(file, elftype, strip) = arg
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
origmode = os.stat(file)[stat.ST_MODE]
newmode = origmode | stat.S_IWRITE | stat.S_IREAD
os.chmod(file, newmode)
extraflags = ""
# kernel module
if elftype & 16:
extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
# .so and shared library
elif ".so" in file and elftype & 8:
extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
# shared or executable:
elif elftype & 8 or elftype & 4:
extraflags = "--remove-section=.comment --remove-section=.note"
stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
bb.debug(1, "runstrip: %s" % stripcmd)
ret = subprocess.call(stripcmd, shell=True)
if newmode:
os.chmod(file, origmode)
if ret:
bb.error("runstrip: '%s' strip command failed" % stripcmd)
return
def file_translate(file):
ft = file.replace("@", "@at@")
ft = ft.replace(" ", "@space@")
ft = ft.replace("\t", "@tab@")
ft = ft.replace("[", "@openbrace@")
ft = ft.replace("]", "@closebrace@")
ft = ft.replace("_", "@underscore@")
return ft
def filedeprunner(arg):
import re, subprocess, shlex
(pkg, pkgfiles, rpmdeps, pkgdest) = arg
provides = {}
requires = {}
r = re.compile(r'[<>=]+ +[^ ]*')
def process_deps(pipe, pkg, pkgdest, provides, requires):
for line in pipe:
f = line.split(" ", 1)[0].strip()
line = line.split(" ", 1)[1].strip()
if line.startswith("Requires:"):
i = requires
elif line.startswith("Provides:"):
i = provides
else:
continue
file = f.replace(pkgdest + "/" + pkg, "")
file = file_translate(file)
value = line.split(":", 1)[1].strip()
value = r.sub(r'(\g<0>)', value)
if value.startswith("rpmlib("):
continue
if value == "python":
continue
if file not in i:
i[file] = []
i[file].append(value)
return provides, requires
try:
dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
except OSError as e:
bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
raise e
return (pkg, provides, requires)
########NEW FILE########
__FILENAME__ = packagedata
import codecs
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
def read_pkgdatafile(fn):
pkgdata = {}
def decode(str):
c = codecs.getdecoder("string_escape")
return c(str)[0]
if os.access(fn, os.R_OK):
import re
f = open(fn, 'r')
lines = f.readlines()
f.close()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
if m:
pkgdata[m.group(1)] = decode(m.group(2))
return pkgdata
def get_subpkgedata_fn(pkg, d):
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
def has_subpkgdata(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
def read_subpkgdata(pkg, d):
return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
def has_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return os.access(fn, os.R_OK)
def read_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return read_pkgdatafile(fn)
#
# Collapse FOO_pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
newvar = var.replace("_" + pkg, "")
if newvar == var and var + "_" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
pkgdatadir = d.getVar("PKGDATA_DIR", True)
pkgmap = {}
try:
files = os.listdir(pkgdatadir)
except OSError:
bb.warn("No files in %s?" % pkgdatadir)
files = []
for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
try:
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
except OSError:
continue
packages = pkgdata.get("PACKAGES") or ""
for pkg in packages.split():
pkgmap[pkg] = pn
return pkgmap
def pkgmap(d):
"""Return a dictionary mapping package to recipe name.
Cache the mapping in the metadata"""
pkgmap_data = d.getVar("__pkgmap_data", False)
if pkgmap_data is None:
pkgmap_data = _pkgmap(d)
d.setVar("__pkgmap_data", pkgmap_data)
return pkgmap_data
def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
########NEW FILE########
__FILENAME__ = packagegroup
import itertools
def is_optional(feature, d):
packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
if packages:
return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
else:
return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
if not packages:
packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
for pkg in (packages or "").split():
yield pkg
def required_packages(features, d):
req = filter(lambda feature: not is_optional(feature, d), features)
return packages(req, d)
def optional_packages(features, d):
opt = filter(lambda feature: is_optional(feature, d), features)
return packages(opt, d)
def active_packages(features, d):
return itertools.chain(required_packages(features, d),
optional_packages(features, d))
def active_recipes(features, d):
import oe.packagedata
for pkg in active_packages(features, d):
recipe = oe.packagedata.recipename(pkg, d)
if recipe:
yield recipe
########NEW FILE########
__FILENAME__ = package_manager
from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
import multiprocessing
import re
import bb
import tempfile
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
try:
bb.note("Executing '%s' ..." % index_cmd)
subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
return("Index creation command '%s' failed with return code %d:\n%s" %
(e.cmd, e.returncode, e.output))
return None
class Indexer(object):
__metaclass__ = ABCMeta
def __init__(self, d, deploy_dir):
self.d = d
self.deploy_dir = deploy_dir
@abstractmethod
def write_index(self):
pass
class RpmIndexer(Indexer):
def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
package_archs = {
'default': [],
}
target_os = {
'default': "",
}
if arch_var is not None and os_var is not None:
package_archs['default'] = self.d.getVar(arch_var, True).split()
package_archs['default'].reverse()
target_os['default'] = self.d.getVar(os_var, True).strip()
else:
package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
# arch order is reversed. This ensures the -best- match is
# listed first!
package_archs['default'].reverse()
target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
multilibs = self.d.getVar('MULTILIBS', True) or ""
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib':
localdata = bb.data.createCopy(self.d)
default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
default_tune = localdata.getVar(default_tune_key, False)
if default_tune:
localdata.setVar("DEFAULTTUNE", default_tune)
bb.data.update_data(localdata)
package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
True).split()
package_archs[eext[1]].reverse()
target_os[eext[1]] = localdata.getVar("TARGET_OS",
True).strip()
ml_prefix_list = dict()
for mlib in package_archs:
if mlib == 'default':
ml_prefix_list[mlib] = package_archs[mlib]
else:
ml_prefix_list[mlib] = list()
for arch in package_archs[mlib]:
if arch in ['all', 'noarch', 'any']:
ml_prefix_list[mlib].append(arch)
else:
ml_prefix_list[mlib].append(mlib + "_" + arch)
return (ml_prefix_list, target_os)
def write_index(self):
sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
archs = set()
for item in mlb_prefix_list:
archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
if len(archs) == 0:
archs = archs.union(set(all_mlb_pkg_archs))
archs = archs.union(set(sdk_pkg_archs))
rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
index_cmds = []
rpm_dirs_found = False
for arch in archs:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir))
rpm_dirs_found = True
if not rpm_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
nproc = multiprocessing.cpu_count()
pool = bb.utils.multiprocessingpool(nproc)
results = list(pool.imap(create_index, index_cmds))
pool.close()
pool.join()
for result in results:
if result is not None:
return(result)
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
"MULTILIB_ARCHS"]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
open(os.path.join(self.deploy_dir, "Packages"), "w").close()
index_cmds = []
for arch_var in arch_vars:
archs = self.d.getVar(arch_var, True)
if archs is None:
continue
for arch in archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
pkgs_file = os.path.join(pkgs_dir, "Packages")
if not os.path.isdir(pkgs_dir):
continue
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
index_cmds.append('%s -r %s -p %s -m %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
if len(index_cmds) == 0:
bb.note("There are no packages in %s!" % self.deploy_dir)
return
nproc = multiprocessing.cpu_count()
pool = bb.utils.multiprocessingpool(nproc)
results = list(pool.imap(create_index, index_cmds))
pool.close()
pool.join()
for result in results:
if result is not None:
return(result)
class DpkgIndexer(Indexer):
def write_index(self):
pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
if pkg_archs is not None:
arch_list = pkg_archs.split()
sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
gzip = bb.utils.which(os.getenv('PATH'), "gzip")
index_cmds = []
deb_dirs_found = False
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
cmd += "%s -fc Packages > Packages.gz;" % gzip
with open(os.path.join(arch_dir, "Release"), "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
index_cmds.append(cmd)
deb_dirs_found = True
if not deb_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
nproc = multiprocessing.cpu_count()
pool = bb.utils.multiprocessingpool(nproc)
results = list(pool.imap(create_index, index_cmds))
pool.close()
pool.join()
for result in results:
if result is not None:
return(result)
class PkgsList(object):
__metaclass__ = ABCMeta
def __init__(self, d, rootfs_dir):
self.d = d
self.rootfs_dir = rootfs_dir
@abstractmethod
def list(self, format=None):
pass
class RpmPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
super(RpmPkgsList, self).__init__(d, rootfs_dir)
self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
self.ml_prefix_list, self.ml_os_list = \
RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
'''
Translate the RPM/Smart format names to the OE multilib format names
'''
def _pkg_translate_smart_to_oe(self, pkg, arch):
new_pkg = pkg
fixed_arch = arch.replace('_', '-')
found = 0
for mlib in self.ml_prefix_list:
for cmp_arch in self.ml_prefix_list[mlib]:
fixed_cmp_arch = cmp_arch.replace('_', '-')
if fixed_arch == fixed_cmp_arch:
if mlib == 'default':
new_pkg = pkg
new_arch = cmp_arch
else:
new_pkg = mlib + '-' + pkg
# We need to strip off the ${mlib}_ prefix on the arch
new_arch = cmp_arch.replace(mlib + '_', '')
# Workaround for bug 3565. Simply look to see if we
# know of a package with that name, if not try again!
filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
'runtime-reverse',
new_pkg)
if os.path.exists(filename):
found = 1
break
if found == 1 and fixed_arch == fixed_cmp_arch:
break
#bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
return new_pkg, new_arch
def _list_pkg_deps(self):
cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
"-t", self.image_rpmlib]
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the package dependencies. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
return output
def list(self, format=None):
if format == "deps":
return self._list_pkg_deps()
cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
cmd += ' -D "_dbpath /var/lib/rpm" -qa'
cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
try:
# bb.note(cmd)
tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
output = list()
for line in tmp_output.split('\n'):
if len(line.strip()) == 0:
continue
pkg = line.split()[0]
arch = line.split()[1]
ver = line.split()[2]
pkgorigin = line.split()[3]
new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
if format == "arch":
output.append('%s %s' % (new_pkg, new_arch))
elif format == "file":
output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch))
elif format == "ver":
output.append('%s %s %s' % (new_pkg, new_arch, ver))
else:
output.append('%s' % (new_pkg))
output.sort()
return '\n'.join(output)
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
super(OpkgPkgsList, self).__init__(d, rootfs_dir)
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
self.opkg_args += self.d.getVar("OPKG_ARGS", True)
def list(self, format=None):
opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
if format == "arch":
cmd = "%s %s status | %s -a" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "file":
cmd = "%s %s status | %s -f" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "ver":
cmd = "%s %s status | %s -v" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
elif format == "deps":
cmd = "%s %s status | %s" % \
(self.opkg_cmd, self.opkg_args, opkg_query_cmd)
else:
cmd = "%s %s list_installed | cut -d' ' -f1" % \
(self.opkg_cmd, self.opkg_args)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
if output and format == "file":
tmp_output = ""
for line in output.split('\n'):
pkg, pkg_file, pkg_arch = line.split()
full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
if os.path.exists(full_path):
tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
else:
tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
output = tmp_output
return output
class DpkgPkgsList(PkgsList):
def list(self, format=None):
cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
if format == "arch":
cmd.append("-f=${Package} ${PackageArch}\n")
elif format == "file":
cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n")
elif format == "ver":
cmd.append("-f=${Package} ${PackageArch} ${Version}\n")
elif format == "deps":
cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
else:
cmd.append("-f=${Package}\n")
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
if format == "file":
tmp_output = ""
for line in tuple(output.split('\n')):
pkg, pkg_file, pkg_arch = line.split()
full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
if os.path.exists(full_path):
tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
else:
tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
output = tmp_output
elif format == "deps":
opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
file_out = tempfile.NamedTemporaryFile()
file_out.write(output)
file_out.flush()
try:
output = subprocess.check_output("cat %s | %s" %
(file_out.name, opkg_query_cmd),
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
file_out.close()
bb.fatal("Cannot compute packages dependencies. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
file_out.close()
return output
class PackageManager(object):
"""
This is an abstract class. Do not instantiate this directly.
"""
__metaclass__ = ABCMeta
def __init__(self, d):
self.d = d
self.deploy_dir = None
self.deploy_lock = None
self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
"""
Update the package manager package database.
"""
@abstractmethod
def update(self):
pass
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
"""
@abstractmethod
def install(self, pkgs, attempt_only=False):
pass
"""
Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
is False, the any dependencies are left in place.
"""
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
pass
"""
This function creates the index files
"""
@abstractmethod
def write_index(self):
pass
@abstractmethod
def remove_packaging_data(self):
pass
@abstractmethod
def list_installed(self, format=None):
pass
@abstractmethod
def insert_feeds_uris(self):
pass
"""
Install complementary packages based upon the list of currently installed
packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
these packages, if they don't exist then no error will occur. Note: every
backend needs to call this function explicitly after the normal package
installation
"""
def install_complementary(self, globs=None):
# we need to write the list of installed packages to a file because the
# oe-pkgdata-util reads it from a file
installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
"installed_pkgs.txt")
with open(installed_pkgs_file, "w+") as installed_pkgs:
installed_pkgs.write(self.list_installed("arch"))
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
split_linguas = set()
for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
split_linguas = sorted(split_linguas)
for lang in split_linguas:
globs += " *-locale-%s" % lang
if globs is None:
return
cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
"glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file,
globs]
try:
bb.note("Installing complementary packages ...")
complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output))
self.install(complementary_pkgs.split(), attempt_only=True)
def deploy_dir_lock(self):
if self.deploy_dir is None:
raise RuntimeError("deploy_dir is not set!")
lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
self.deploy_lock = bb.utils.lockfile(lock_file_name)
def deploy_dir_unlock(self):
if self.deploy_lock is None:
return
bb.utils.unlockfile(self.deploy_lock)
self.deploy_lock = None
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
providename=None,
arch_var=None,
os_var=None):
super(RpmPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.target_vendor = target_vendor
self.task_name = task_name
self.providename = providename
self.fullpkglist = list()
self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
self.install_dir = os.path.join(self.target_rootfs, "install")
self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
self.smart_opt = "--data-dir=" + os.path.join(target_rootfs,
'var/lib/smart')
self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
self.indexer = RpmIndexer(self.d, self.deploy_dir)
self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
def insert_feeds_uris(self):
if self.feed_uris == "":
return
# List must be prefered to least preferred order
default_platform_extra = set()
platform_extra = set()
bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
for mlib in self.ml_os_list:
for arch in self.ml_prefix_list[mlib]:
plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
if mlib == bbextendvariant:
default_platform_extra.add(plt)
else:
platform_extra.add(plt)
platform_extra = platform_extra.union(default_platform_extra)
arch_list = []
for canonical_arch in platform_extra:
arch = canonical_arch.split('-')[0]
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
uri_iterator = 0
channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list)
for uri in self.feed_uris.split():
for arch in arch_list:
bb.note('Note: adding Smart channel url%d%s (%s)' %
(uri_iterator, arch, channel_priority))
self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y'
% (uri_iterator, arch, uri, arch))
self._invoke_smart('channel --set url%d-%s priority=%d' %
(uri_iterator, arch, channel_priority))
channel_priority -= 5
uri_iterator += 1
'''
Create configs for rpm and smart, and multilib is supported
'''
def create_configs(self):
target_arch = self.d.getVar('TARGET_ARCH', True)
platform = '%s%s-%s' % (target_arch.replace('-', '_'),
self.target_vendor,
self.ml_os_list['default'])
# List must be prefered to least preferred order
default_platform_extra = list()
platform_extra = list()
bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
for mlib in self.ml_os_list:
for arch in self.ml_prefix_list[mlib]:
plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
if mlib == bbextendvariant:
if plt not in default_platform_extra:
default_platform_extra.append(plt)
else:
if plt not in platform_extra:
platform_extra.append(plt)
platform_extra = default_platform_extra + platform_extra
self._create_configs(platform, platform_extra)
def _invoke_smart(self, args):
cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
# bb.note(cmd)
try:
complementary_pkgs = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True)
# bb.note(complementary_pkgs)
return complementary_pkgs
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke smart. Command "
"'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
def _search_pkg_name_in_feeds(self, pkg, feed_archs):
for arch in feed_archs:
arch = arch.replace('-', '_')
for p in self.fullpkglist:
regex_match = r"^%s-[^-]*-[^-]*@%s$" % \
(re.escape(pkg), re.escape(arch))
if re.match(regex_match, p) is not None:
# First found is best match
# bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
return pkg + '@' + arch
return ""
'''
Translate the OE multilib format names to the RPM/Smart format names
It searched the RPM/Smart format names in probable multilib feeds first,
and then searched the default base feed.
'''
def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
new_pkgs = list()
for pkg in pkgs:
new_pkg = pkg
# Search new_pkg in probable multilibs first
for mlib in self.ml_prefix_list:
# Jump the default archs
if mlib == 'default':
continue
subst = pkg.replace(mlib + '-', '')
# if the pkg in this multilib feed
if subst != pkg:
feed_archs = self.ml_prefix_list[mlib]
new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
if not new_pkg:
# Failed to translate, package not found!
err_msg = '%s not found in the %s feeds (%s).\n' % \
(pkg, mlib, " ".join(feed_archs))
if not attempt_only:
err_msg += " ".join(self.fullpkglist)
bb.fatal(err_msg)
bb.warn(err_msg)
else:
new_pkgs.append(new_pkg)
break
# Apparently not a multilib package...
if pkg == new_pkg:
# Search new_pkg in default archs
default_archs = self.ml_prefix_list['default']
new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
if not new_pkg:
err_msg = '%s not found in the base feeds (%s).\n' % \
(pkg, ' '.join(default_archs))
if not attempt_only:
err_msg += " ".join(self.fullpkglist)
bb.fatal(err_msg)
bb.warn(err_msg)
else:
new_pkgs.append(new_pkg)
return new_pkgs
def _create_configs(self, platform, platform_extra):
# Setup base system configuration
bb.note("configuring RPM platform settings")
# Configure internal RPM environment when using Smart
os.environ['RPM_ETCRPM'] = self.etcrpm_dir
bb.utils.mkdirhier(self.etcrpm_dir)
# Setup temporary directory -- install...
if os.path.exists(self.install_dir):
bb.utils.remove(self.install_dir, True)
bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp'))
channel_priority = 5
platform_dir = os.path.join(self.etcrpm_dir, "platform")
with open(platform_dir, "w+") as platform_fd:
platform_fd.write(platform + '\n')
for pt in platform_extra:
channel_priority += 5
platform_fd.write(re.sub("-linux.*$", "-linux.*\n", pt))
# Tell RPM that the "/" directory exist and is available
bb.note("configuring RPM system provides")
sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
bb.utils.mkdirhier(sysinfo_dir)
with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
dirnames.write("/\n")
if self.providename:
providename_dir = os.path.join(sysinfo_dir, "Providename")
if not os.path.exists(providename_dir):
providename_content = '\n'.join(self.providename)
providename_content += '\n'
open(providename_dir, "w+").write(providename_content)
# Configure RPM... we enforce these settings!
bb.note("configuring RPM DB settings")
# After change the __db.* cache size, log file will not be
# generated automatically, that will raise some warnings,
# so touch a bare log for rpm write into it.
rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
if not os.path.exists(rpmlib_log):
bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
open(rpmlib_log, 'w+').close()
DB_CONFIG_CONTENT = "# ================ Environment\n" \
"set_data_dir .\n" \
"set_create_dir .\n" \
"set_lg_dir ./log\n" \
"set_tmp_dir ./tmp\n" \
"set_flags db_log_autoremove on\n" \
"\n" \
"# -- thread_count must be >= 8\n" \
"set_thread_count 64\n" \
"\n" \
"# ================ Logging\n" \
"\n" \
"# ================ Memory Pool\n" \
"set_cachesize 0 1048576 0\n" \
"set_mp_mmapsize 268435456\n" \
"\n" \
"# ================ Locking\n" \
"set_lk_max_locks 16384\n" \
"set_lk_max_lockers 16384\n" \
"set_lk_max_objects 16384\n" \
"mutex_set_max 163840\n" \
"\n" \
"# ================ Replication\n"
db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
if not os.path.exists(db_config_dir):
open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
# Create database so that smart doesn't complain (lazy init)
cmd = "%s --root %s --dbpath /var/lib/rpm -qa > /dev/null" % (
self.rpm_cmd,
self.target_rootfs)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Create rpm database failed. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Configure smart
bb.note("configuring Smart settings")
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
self._invoke_smart('config --set rpm-extra-macros._var=%s' %
self.d.getVar('localstatedir', True))
cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp'
self._invoke_smart(cmd)
# Write common configuration for host and target usage
self._invoke_smart('config --set rpm-nolinktos=1')
self._invoke_smart('config --set rpm-noparentdirs=1')
for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
self._invoke_smart('flag --set ignore-recommends %s' % i)
# Do the following configurations here, to avoid them being
# saved for field upgrade
if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
self._invoke_smart('config --set ignore-all-recommends=1')
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
for i in pkg_exclude.split():
self._invoke_smart('flag --set exclude-packages %s' % i)
# Optional debugging
# self._invoke_smart('config --set rpm-log-level=debug')
# cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
# self._invoke_smart(cmd)
ch_already_added = []
for canonical_arch in platform_extra:
arch = canonical_arch.split('-')[0]
arch_channel = os.path.join(self.deploy_dir, arch)
if os.path.exists(arch_channel) and not arch in ch_already_added:
bb.note('Note: adding Smart channel %s (%s)' %
(arch, channel_priority))
self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
% (arch, arch_channel))
self._invoke_smart('channel --set %s priority=%d' %
(arch, channel_priority))
channel_priority -= 5
ch_already_added.append(arch)
bb.note('adding Smart RPM DB channel')
self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
# Construct install scriptlet wrapper.
# Scripts need to be ordered when executed, this ensures numeric order.
# If we ever run into needing more the 899 scripts, we'll have to.
# change num to start with 1000.
#
SCRIPTLET_FORMAT = "#!/bin/bash\n" \
"\n" \
"export PATH=%s\n" \
"export D=%s\n" \
'export OFFLINE_ROOT="$D"\n' \
'export IPKG_OFFLINE_ROOT="$D"\n' \
'export OPKG_OFFLINE_ROOT="$D"\n' \
"export INTERCEPT_DIR=%s\n" \
"export NATIVE_ROOT=%s\n" \
"\n" \
"$2 $1/$3 $4\n" \
"if [ $? -ne 0 ]; then\n" \
" if [ $4 -eq 1 ]; then\n" \
" mkdir -p $1/etc/rpm-postinsts\n" \
" num=100\n" \
" while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
" name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \
' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
" cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \
" chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
" else\n" \
' echo "Error: pre/post remove scriptlet failed"\n' \
" fi\n" \
"fi\n"
intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
self.target_rootfs,
intercept_dir,
native_root)
open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
os.chmod(self.scriptlet_wrapper, 0755)
cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
self.scriptlet_wrapper
self._invoke_smart(cmd)
# Debug to show smart config info
# bb.note(self._invoke_smart('config --show'))
def update(self):
self._invoke_smart('update rpmsys')
'''
Install pkgs with smart, the pkg name is oe format
'''
def install(self, pkgs, attempt_only=False):
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
if attempt_only and len(pkgs) == 0:
return
pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
if not attempt_only:
bb.note('to be installed: %s' % ' '.join(pkgs))
cmd = "%s %s install -y %s" % \
(self.smart_cmd, self.smart_opt, ' '.join(pkgs))
bb.note(cmd)
else:
bb.note('installing attempt only packages...')
bb.note('Attempting %s' % ' '.join(pkgs))
cmd = "%s %s install --attempt -y %s" % \
(self.smart_cmd, self.smart_opt, ' '.join(pkgs))
try:
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
'''
Remove pkgs with smart, the pkg name is smart/rpm format
'''
def remove(self, pkgs, with_dependencies=True):
bb.note('to be removed: ' + ' '.join(pkgs))
if not with_dependencies:
cmd = "%s -e --nodeps " % self.rpm_cmd
cmd += "--root=%s " % self.target_rootfs
cmd += "--dbpath=/var/lib/rpm "
cmd += "--define='_cross_scriptlet_wrapper %s' " % \
self.scriptlet_wrapper
cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs)
else:
# for pkg in pkgs:
# bb.note('Debug: What required: %s' % pkg)
# bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
cmd = "%s %s remove -y %s" % (self.smart_cmd,
self.smart_opt,
' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.note("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
def upgrade(self):
bb.note('smart upgrade')
self._invoke_smart('upgrade')
def write_index(self):
result = self.indexer.write_index()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
bb.utils.remove(self.image_rpmlib, True)
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
# remove temp directory
bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True)
def backup_packaging_data(self):
# Save the rpmlib for increment rpm image generation
if os.path.exists(self.saved_rpmlib):
bb.utils.remove(self.saved_rpmlib, True)
shutil.copytree(self.image_rpmlib,
self.saved_rpmlib,
symlinks=True)
def recovery_packaging_data(self):
# Move the rpmlib back
if os.path.exists(self.saved_rpmlib):
if os.path.exists(self.image_rpmlib):
bb.utils.remove(self.image_rpmlib, True)
bb.note('Recovery packaging data')
shutil.copytree(self.saved_rpmlib,
self.image_rpmlib,
symlinks=True)
def list_installed(self, format=None):
return self.pkgs_list.list(format)
'''
If incremental install, we need to determine what we've got,
what we need to add, and what to remove...
The dump_install_solution will dump and save the new install
solution.
'''
def dump_install_solution(self, pkgs):
bb.note('creating new install solution for incremental install')
if len(pkgs) == 0:
return
pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
install_pkgs = list()
cmd = "%s %s install -y --dump %s 2>%s" % \
(self.smart_cmd,
self.smart_opt,
' '.join(pkgs),
self.solution_manifest)
try:
# Disable rpmsys channel for the fake install
self._invoke_smart('channel --disable rpmsys')
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
with open(self.solution_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
install_pkgs.append(pkg)
except subprocess.CalledProcessError as e:
bb.note("Unable to dump install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Recovery rpmsys channel
self._invoke_smart('channel --enable rpmsys')
return install_pkgs
'''
If incremental install, we need to determine what we've got,
what we need to add, and what to remove...
The load_old_install_solution will load the previous install
solution
'''
def load_old_install_solution(self):
bb.note('load old install solution for incremental install')
installed_pkgs = list()
if not os.path.exists(self.solution_manifest):
bb.note('old install solution not exist')
return installed_pkgs
with open(self.solution_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
installed_pkgs.append(pkg.strip())
return installed_pkgs
'''
Dump all available packages in feeds, it should be invoked after the
newest rpm index was created
'''
def dump_all_available_pkgs(self):
available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
available_pkgs = list()
cmd = "%s %s query --output %s" % \
(self.smart_cmd, self.smart_opt, available_manifest)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
with open(available_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
if '@' in pkg:
available_pkgs.append(pkg.strip())
except subprocess.CalledProcessError as e:
bb.note("Unable to list all available packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
self.fullpkglist = available_pkgs
return
def save_rpmpostinst(self, pkg):
mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
new_pkg = pkg
# Remove any multilib prefix from the package name
for mlib in mlibs:
if mlib in pkg:
new_pkg = pkg.replace(mlib + '-', '')
break
bb.note(' * postponing %s' % new_pkg)
saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
try:
bb.note(cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
bb.note(output)
os.chmod(saved_dir, 0755)
except subprocess.CalledProcessError as e:
bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
'''Write common configuration for target usage'''
def rpm_setup_smart_target_config(self):
bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
True)
self._invoke_smart('config --set rpm-nolinktos=1')
self._invoke_smart('config --set rpm-noparentdirs=1')
for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
self._invoke_smart('flag --set ignore-recommends %s' % i)
self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
'''
The rpm db lock files were produced after invoking rpm to query on
build system, and they caused the rpm on target didn't work, so we
need to unlock the rpm db by removing the lock files.
'''
def unlock_rpm_db(self):
# Remove rpm db lock files
rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
for f in rpm_db_locks:
bb.utils.remove(f, True)
class OpkgPM(PackageManager):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
super(OpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs)
self.opkg_args += self.d.getVar("OPKG_ARGS", True)
opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
bb.utils.mkdirhier(self.opkg_dir)
self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
self._create_config()
else:
self._create_custom_config()
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/opkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
feed_uri = feed_match.group(2)
bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
"""
Allow to use package deploy directory contents as quick devel-testing
feed. This creates individual feed configs for each arch subdir of those
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = os.path.join(self.target_rootfs,
self.d.getVar("sysconfdir", True),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
arch,
self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
arch)
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
config_file.write("src oe file:%s\n" % self.deploy_dir)
for arch in self.pkg_archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
if os.path.isdir(pkgs_dir):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
def insert_feeds_uris(self):
if self.feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in self.feed_uris.split():
config_file.write("src/gz url-%d %s/ipk\n" %
(uri_iterator, uri))
for arch in self.pkg_archs.split():
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
bb.note('Note: adding opkg channel url-%s-%d (%s)' %
(arch, uri_iterator, uri))
config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" %
(arch, uri_iterator, uri, arch))
uri_iterator += 1
def update(self):
self.deploy_dir_lock()
cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.deploy_dir_unlock()
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if attempt_only and len(pkgs) == 0:
return
cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
bb.utils.remove(self.opkg_dir, True)
# create the directory back, it's needed by PM lock
bb.utils.mkdirhier(self.opkg_dir)
def list_installed(self, format=None):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format)
def handle_bad_recommendations(self):
bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
if bad_recommendations.strip() == "":
return
status_file = os.path.join(self.opkg_dir, "status")
# If status file existed, it means the bad recommendations has already
# been handled
if os.path.exists(status_file):
return
cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
with open(status_file, "w+") as status:
for pkg in bad_recommendations.split():
pkg_info = cmd + pkg
try:
output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get package info. Command '%s' "
"returned %d:\n%s" % (pkg_info, e.returncode, e.output))
if output == "":
bb.note("Ignored bad recommendation: '%s' is "
"not a package" % pkg)
continue
for line in output.split('\n'):
if line.startswith("Status:"):
status.write("Status: deinstall hold not-installed\n")
else:
status.write(line + "\n")
'''
The following function dummy installs pkgs and returns the log of output.
'''
def dummy_install(self, pkgs):
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
opkg_args += self.d.getVar("OPKG_ARGS", True)
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
# Dummy installation
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to dummy install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
bb.utils.remove(temp_rootfs, True)
return output
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
if os.path.exists(self.saved_opkg_dir):
bb.utils.remove(self.saved_opkg_dir, True)
shutil.copytree(self.opkg_dir,
self.saved_opkg_dir,
symlinks=True)
def recover_packaging_data(self):
# Move the opkglib back
if os.path.exists(self.saved_opkg_dir):
if os.path.exists(self.opkg_dir):
bb.utils.remove(self.opkg_dir, True)
bb.note('Recover packaging data')
shutil.copytree(self.saved_opkg_dir,
self.opkg_dir,
symlinks=True)
class DpkgPM(PackageManager):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
super(DpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
self.apt_conf_dir = apt_conf_dir
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_args = d.getVar("APT_ARGS", True)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/dpkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
"""
Run the pre/post installs for package "package_name". If package_name is
None, then run all pre/post install scriptlets.
"""
def run_pre_post_installs(self, package_name=None):
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
m = re.match("^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
if package_name is not None and not package_name in installed_pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
failed_pkgs = []
for pkg_name in installed_pkgs:
for suffix in suffixes:
p_full = os.path.join(info_dir, pkg_name + suffix[0])
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
(suffix[1].lower(), pkg_name))
subprocess.check_output(p_full, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.note("%s for package %s failed with %d:\n%s" %
(suffix[1], pkg_name, e.returncode, e.output))
failed_pkgs.append(pkg_name)
break
if len(failed_pkgs):
self.mark_packages("unpacked", failed_pkgs)
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
self.deploy_dir_lock()
cmd = "%s update" % self.apt_get_cmd
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
(self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
new_dir = re.sub("\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub("\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs))
else:
cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
" -r --force-depends %s" % \
(bb.utils.which(os.getenv('PATH'), "dpkg"),
self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def insert_feeds_uris(self):
if self.feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
arch_list = []
archs = self.d.getVar('PACKAGE_ARCHS', True)
for arch in archs.split():
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(sources_conf, "w+") as sources_file:
for uri in self.feed_uris.split():
for arch in arch_list:
bb.note('Note: adding dpkg channel at (%s)' % uri)
sources_file.write("deb %s/deb/%s ./\n" %
(uri, arch))
def _create_configs(self, archs, base_archs):
base_archs = re.sub("_", "-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
arch_list = []
for arch in archs.split():
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
priority = 801
for arch in arch_list:
prefs_file.write(
"Package: *\n"
"Pin: release l=%s\n"
"Pin-Priority: %d\n\n" % (arch, priority))
priority += 5
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
for pkg in pkg_exclude:
prefs_file.write(
"Package: %s\n"
"Pin: release *\n"
"Pin-Priority: -1\n\n" % pkg)
arch_list.reverse()
with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
for arch in arch_list:
sources_file.write("deb file:%s/ ./\n" %
os.path.join(self.deploy_dir, arch))
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
line = re.sub("Architecture \".*\";",
"Architecture \"%s\";" % base_archs, line)
line = re.sub("#ROOTFS#", self.target_rootfs, line)
line = re.sub("#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
open(os.path.join(target_dpkg_dir, "status"), "w+").close()
if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
bb.utils.remove(os.path.join(self.target_rootfs,
self.d.getVar('opkglibdir', True)), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Cannot fix broken dependencies. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
def list_installed(self, format=None):
return DpkgPkgsList(self.d, self.target_rootfs).list()
def generate_index_files(d):
classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
indexer_map = {
"rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
}
result = None
for pkg_class in classes:
if not pkg_class in indexer_map:
continue
if os.path.exists(indexer_map[pkg_class][1]):
result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
if result is not None:
bb.fatal(result)
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""
########NEW FILE########
__FILENAME__ = patch
import oe.path
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
self.path = path
def __str__(self):
return "Error: %s not found." % self.path
class CmdError(bb.BBHandledException):
def __init__(self, exitstatus, output):
self.status = exitstatus
self.output = output
def __str__(self):
return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
def runcmd(args, dir = None):
import pipes
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
raise NotFoundError(dir)
os.chdir(dir)
# print("cwd: %s -> %s" % (olddir, dir))
try:
args = [ pipes.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
(exitstatus, output) = oe.utils.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(exitstatus >> 8, output)
return output
finally:
if dir:
os.chdir(olddir)
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "Patch Error: %s" % self.msg
class PatchSet(object):
defaults = {
"strippath": 1
}
def __init__(self, dir, d):
self.dir = dir
self.d = d
self.patches = []
self._current = None
def current(self):
return self._current
def Clean(self):
"""
Clean out the patch set. Generally includes unapplying all
patches and wiping out all associated metadata.
"""
raise NotImplementedError()
def Import(self, patch, force):
if not patch.get("file"):
if not patch.get("remote"):
raise PatchError("Patch file must be specified in patch import.")
else:
patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
for param in PatchSet.defaults:
if not patch.get(param):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
patch["filemd5"] = bb.utils.md5_file(patch["file"])
def Push(self, force):
raise NotImplementedError()
def Pop(self, force):
raise NotImplementedError()
def Refresh(self, remote = None, all = None):
raise NotImplementedError()
class PatchTree(PatchSet):
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.patchdir = os.path.join(self.dir, 'patches')
self.seriespath = os.path.join(self.dir, 'patches', 'series')
bb.utils.mkdirhier(self.patchdir)
def _appendPatchFile(self, patch, strippath):
with open(self.seriespath, 'a') as f:
f.write(os.path.basename(patch) + "," + strippath + "\n")
shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
def _removePatch(self, p):
patch = {}
patch['file'] = p.split(",")[0]
patch['strippath'] = p.split(",")[1]
self._applypatch(patch, False, True)
def _removePatchFile(self, all = False):
if not os.path.exists(self.seriespath):
return
patches = open(self.seriespath, 'r+').readlines()
if all:
for p in reversed(patches):
self._removePatch(os.path.join(self.patchdir, p.strip()))
patches = []
else:
self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
patches.pop()
with open(self.seriespath, 'w') as f:
for p in patches:
f.write(p)
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
if self._current is not None:
i = self._current + 1
else:
i = 0
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
if not run:
return "sh" + "-c" + " ".join(shellcmd)
if not force:
shellcmd.append('--dry-run')
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
if force:
return
shellcmd.pop(len(shellcmd) - 1)
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
if not reverse:
self._appendPatchFile(patch['file'], patch['strippath'])
return output
def Push(self, force = False, all = False, run = True):
bb.note("self._current is %s" % self._current)
bb.note("patches is %s" % self.patches)
if all:
for i in self.patches:
bb.note("applying patch %s" % i)
self._applypatch(i, force)
self._current = i
else:
if self._current is not None:
next = self._current + 1
else:
next = 0
bb.note("applying patch %s" % self.patches[next])
ret = self._applypatch(self.patches[next], force)
self._current = next
return ret
def Pop(self, force = None, all = None):
if all:
self._removePatchFile(True)
self._current = None
else:
self._removePatchFile(False)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Clean(self):
""""""
self.Pop(all=True)
class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
def _applypatch(self, patch, force = False, reverse = False, run = True):
def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
if reverse:
shellcmd.append('-R')
shellcmd.append(patch['file'])
if not run:
return "sh" + "-c" + " ".join(shellcmd)
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
try:
shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
except CmdError:
shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
quiltrc = self.d.getVar('QUILTRCFILE', True)
if not run:
return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
def _quiltpatchpath(self, file):
return os.path.join(self.dir, "patches", os.path.basename(file))
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.initialized = False
p = os.path.join(self.dir, 'patches')
if not os.path.exists(p):
os.makedirs(p)
def Clean(self):
try:
self._runcmd(["pop", "-a", "-f"])
oe.path.remove(os.path.join(self.dir, "patches","series"))
except Exception:
pass
self.initialized = True
def InitFromDir(self):
# read series -> self.patches
seriespath = os.path.join(self.dir, 'patches', 'series')
if not os.path.exists(self.dir):
raise NotFoundError(self.dir)
if os.path.exists(seriespath):
series = file(seriespath, 'r')
for line in series.readlines():
patch = {}
parts = line.strip().split()
patch["quiltfile"] = self._quiltpatchpath(parts[0])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
if len(parts) > 1:
patch["strippath"] = parts[1][2:]
self.patches.append(patch)
series.close()
# determine which patches are applied -> self._current
try:
output = runcmd(["quilt", "applied"], self.dir)
except CmdError:
import sys
if sys.exc_value.output.strip() == "No patches applied":
return
else:
raise
output = [val for val in output.split('\n') if not val.startswith('#')]
for patch in self.patches:
if os.path.basename(patch["quiltfile"]) == output[-1]:
self._current = self.patches.index(patch)
self.initialized = True
def Import(self, patch, force = None):
if not self.initialized:
self.InitFromDir()
PatchSet.Import(self, patch, force)
oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
f = open(os.path.join(self.dir, "patches","series"), "a");
f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n")
f.close()
patch["quiltfile"] = self._quiltpatchpath(patch["file"])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
# TODO: determine if the file being imported:
# 1) is already imported, and is the same
# 2) is already imported, but differs
self.patches.insert(self._current or 0, patch)
def Push(self, force = False, all = False, run = True):
# quilt push [-f]
args = ["push"]
if force:
args.append("-f")
if all:
args.append("-a")
if not run:
return self._runcmd(args, run)
self._runcmd(args)
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
def Pop(self, force = None, all = None):
# quilt pop [-f]
args = ["pop"]
if force:
args.append("-f")
if all:
args.append("-a")
self._runcmd(args)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Refresh(self, **kwargs):
if kwargs.get("remote"):
patch = self.patches[kwargs["patch"]]
if not patch:
raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
if type == "file":
import shutil
if not patch.get("file") and patch.get("remote"):
patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
shutil.copyfile(patch["quiltfile"], patch["file"])
else:
raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
else:
# quilt refresh
args = ["refresh"]
if kwargs.get("quiltfile"):
args.append(os.path.basename(kwargs["quiltfile"]))
elif kwargs.get("patch"):
args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
self._runcmd(args)
class Resolver(object):
def __init__(self, patchset, terminal):
raise NotImplementedError()
def Resolve(self):
raise NotImplementedError()
def Revert(self):
raise NotImplementedError()
def Finalize(self):
raise NotImplementedError()
class NOOPResolver(Resolver):
def __init__(self, patchset, terminal):
self.patchset = patchset
self.terminal = terminal
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push()
except Exception:
import sys
os.chdir(olddir)
raise
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
# files (the urls).
class UserResolver(Resolver):
def __init__(self, patchset, terminal):
self.patchset = patchset
self.terminal = terminal
# Force a push in the patchset, then drop to a shell for the user to
# resolve any rejected hunks
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push(False)
except CmdError as v:
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
t = self.patchset.d.getVar('T', True)
if not t:
bb.msg.fatal("Build", "T not set")
bb.utils.mkdirhier(t)
import random
rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
f = open(rcfile, "w")
f.write("echo '*** Manual patch resolution mode ***'\n")
f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
f.write("echo ''\n")
f.write(" ".join(patchcmd) + "\n")
f.close()
os.chmod(rcfile, 0775)
self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
# Construct a new PatchSet after the user's changes, compare the
# sets, checking patches for modifications, and doing a remote
# refresh on each.
oldpatchset = self.patchset
self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
for patch in self.patchset.patches:
oldpatch = None
for opatch in oldpatchset.patches:
if opatch["quiltfile"] == patch["quiltfile"]:
oldpatch = opatch
if oldpatch:
patch["remote"] = oldpatch["remote"]
if patch["quiltfile"] == oldpatch["quiltfile"]:
if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
# user change? remote refresh
self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
else:
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
os.chdir(olddir)
raise
os.chdir(olddir)
########NEW FILE########
__FILENAME__ = path
import errno
import glob
import shutil
import subprocess
import os.path
def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
return os.path.normpath("/".join(paths))
def relative(src, dest):
""" Return a relative path from src to dest.
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
return os.path.relpath(dest, src)
def make_relative_symlink(path):
""" Convert an absolute symlink to a relative one """
if not os.path.islink(path):
return
link = os.readlink(path)
if not os.path.isabs(link):
return
# find the common ancestor directory
ancestor = path
depth = 0
while ancestor and not link.startswith(ancestor):
ancestor = ancestor.rpartition('/')[0]
depth += 1
if not ancestor:
print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
return
base = link.partition(ancestor)[2].strip('/')
while depth > 1:
base = "../" + base
depth -= 1
os.remove(path)
os.symlink(base, path)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR", True), path)
if len(rel) > len(path):
return path
else:
return rel
def copytree(src, dst):
# We could use something like shutil.copytree here but it turns out to
# to be slow. It takes twice as long copying to an empty directory.
# If dst already has contents performance can be 15 time slower
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
""" Make the hard link when possible, otherwise copy. """
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
if (os.stat(src).st_dev == os.stat(dst).st_dev):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst)
check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
def remove(path, recurse=True):
"""Equivalent to rm -f or rm -rf"""
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(name)
elif exc.errno != errno.ENOENT:
raise
def symlink(source, destination, force=False):
"""Create a symbolic link"""
try:
if force:
remove(destination)
os.symlink(source, destination)
except OSError as e:
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
class CalledProcessError(Exception):
def __init__(self, retcode, cmd, output = None):
self.retcode = retcode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output)
# Not needed when we move to python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
for root, dirs, files in os.walk(dir, **walkoptions):
for file in files:
yield os.path.join(root, file)
## realpath() related functions
def __is_path_below(file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = __realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(__is_path_below(start, root))
return start
def __realpath(file, root, loop_cnt, assume_dir):
while os.path.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(__is_path_below(tdir, root))
else:
tdir = root
file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = os.path.isdir(file)
except:
is_dir = false
return (file, is_dir)
def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not __is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = __realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file
########NEW FILE########
__FILENAME__ = prservice
def prserv_make_conn(d, check = False):
import prserv.serv
host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
try:
conn = None
conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
d.setVar("__PRSERV_CONN",conn)
except Exception, exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
return conn
def prserv_dump_db(d):
if not d.getVar('PRSERV_HOST', True):
bb.error("Not using network based PR service")
return None
conn = d.getVar("__PRSERV_CONN", True)
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST', True):
bb.error("Not using network based PR service")
return None
conn = d.getVar("__PRSERV_CONN", True)
if conn is None:
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#get the entry values
imported = []
prefix = "PRAUTO$"
for v in d.keys():
if v.startswith(prefix):
(remain, sep, checksum) = v.rpartition('$')
(remain, sep, pkgarch) = remain.rpartition('$')
(remain, sep, version) = remain.rpartition('$')
if (remain + '$' != prefix) or \
(filter_version and filter_version != version) or \
(filter_pkgarch and filter_pkgarch != pkgarch) or \
(filter_checksum and filter_checksum != checksum):
continue
try:
value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
except BaseException as exc:
bb.debug("Not valid value of %s:%s" % (v,str(exc)))
continue
ret = conn.importone(version,pkgarch,checksum,value)
if ret != value:
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
import bb.utils
#initilize the output file
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
df = d.getVar('PRSERV_DUMPFILE', True)
#write data
lf = bb.utils.lockfile("%s.lock" % df)
f = open(df, "a")
if metainfo:
#dump column info
f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
f.write("#Table: %s\n" % metainfo['tbl_name'])
f.write("#Columns:\n")
f.write("#name \t type \t notn \t dflt \t pk\n")
f.write("#----------\t --------\t --------\t --------\t ----\n")
for i in range(len(metainfo['col_info'])):
f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
(metainfo['col_info'][i]['name'],
metainfo['col_info'][i]['type'],
metainfo['col_info'][i]['notnull'],
metainfo['col_info'][i]['dflt_value'],
metainfo['col_info'][i]['pk']))
f.write("\n")
if lockdown:
f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
if datainfo:
idx = {}
for i in range(len(datainfo)):
pkgarch = datainfo[i]['pkgarch']
value = datainfo[i]['value']
if pkgarch not in idx:
idx[pkgarch] = i
elif value > datainfo[idx[pkgarch]]['value']:
idx[pkgarch] = i
f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
(str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
if not nomax:
for i in idx:
f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
f.close()
bb.utils.unlockfile(lf)
def prserv_check_avail(d):
host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
try:
if len(host_params) != 2:
raise TypeError
else:
int(host_params[1])
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
prserv_make_conn(d, True)
########NEW FILE########
__FILENAME__ = qa
class ELFFile:
EI_NIDENT = 16
EI_CLASS = 4
EI_DATA = 5
EI_VERSION = 6
EI_OSABI = 7
EI_ABIVERSION = 8
# possible values for EI_CLASS
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
# possible value for EI_VERSION
EV_CURRENT = 1
# possible values for EI_DATA
ELFDATANONE = 0
ELFDATA2LSB = 1
ELFDATA2MSB = 2
def my_assert(self, expectation, result):
if not expectation == result:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise Exception("This does not work as expected")
def __init__(self, name, bits = 0):
self.name = name
self.bits = bits
self.objdump_output = {}
def open(self):
self.file = file(self.name, "r")
self.data = self.file.read(ELFFile.EI_NIDENT+4)
self.my_assert(len(self.data), ELFFile.EI_NIDENT+4)
self.my_assert(self.data[0], chr(0x7f) )
self.my_assert(self.data[1], 'E')
self.my_assert(self.data[2], 'L')
self.my_assert(self.data[3], 'F')
if self.bits == 0:
if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32):
self.bits = 32
elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64):
self.bits = 64
else:
# Not 32-bit or 64.. lets assert
raise Exception("ELF but not 32 or 64 bit.")
elif self.bits == 32:
self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
elif self.bits == 64:
self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
else:
raise Exception("Must specify unknown, 32 or 64 bit size.")
self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
self.sex = self.data[ELFFile.EI_DATA]
if self.sex == chr(ELFFile.ELFDATANONE):
raise Exception("self.sex == ELFDATANONE")
elif self.sex == chr(ELFFile.ELFDATA2LSB):
self.sex = "<"
elif self.sex == chr(ELFFile.ELFDATA2MSB):
self.sex = ">"
else:
raise Exception("Unknown self.sex")
def osAbi(self):
return ord(self.data[ELFFile.EI_OSABI])
def abiVersion(self):
return ord(self.data[ELFFile.EI_ABIVERSION])
def abiSize(self):
return self.bits
def isLittleEndian(self):
return self.sex == "<"
def isBigEngian(self):
return self.sex == ">"
def machine(self):
"""
We know the sex stored in self.sex and we
know the position
"""
import struct
(a,) = struct.unpack(self.sex+"H", self.data[18:20])
return a
def run_objdump(self, cmd, d):
import bb.process
import sys
if cmd in self.objdump_output:
return self.objdump_output[cmd]
objdump = d.getVar('OBJDUMP', True)
env = os.environ.copy()
env["LC_ALL"] = "C"
env["PATH"] = d.getVar('PATH', True)
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
return self.objdump_output[cmd]
except Exception as e:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
########NEW FILE########
__FILENAME__ = rootfs
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
import filecmp
import shutil
import os
import subprocess
import re
class Rootfs(object):
"""
This is an abstract class. Do not instantiate this directly.
"""
__metaclass__ = ABCMeta
def __init__(self, d):
self.d = d
self.pm = None
self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True)
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _create(self):
pass
@abstractmethod
def _get_delayed_postinsts(self):
pass
@abstractmethod
def _save_postinsts(self):
pass
@abstractmethod
def _log_check(self):
pass
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
self.pm.insert_feeds_uris()
@abstractmethod
def _handle_intercept_failure(self, failed_script):
pass
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
The method is called, once, at the end of create() method.
"""
@abstractmethod
def _cleanup(self):
pass
def _exec_shell_cmd(self, cmd):
fakerootcmd = self.d.getVar('FAKEROOT', True)
if fakerootcmd is not None:
exec_cmd = [fakerootcmd, cmd]
else:
exec_cmd = cmd
try:
subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
return None
def create(self):
bb.note("###### Generate rootfs #######")
pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
bb.utils.remove(intercepts_dir, True)
bb.utils.mkdirhier(self.image_rootfs)
bb.utils.mkdirhier(self.deploy_dir_image)
shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"),
intercepts_dir)
shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
self.deploy_dir_image +
"/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
execute_pre_post_process(self.d, pre_process_cmds)
# call the package manager dependent create method
self._create()
sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
ver.write(self.d.getVar('BUILDNAME', True) + "\n")
self._run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
bb.fatal("The following packages could not be configured"
"offline and rootfs is read-only: %s" %
delayed_postinsts)
if self.d.getVar('USE_DEVFS', True) != "1":
self._create_devfs()
self._uninstall_uneeded()
self._insert_feed_uris()
self._run_ldconfig()
self._generate_kernel_module_deps()
self._cleanup()
def _uninstall_uneeded(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
return
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt')
pkgs_to_remove = list()
with open(installed_pkgs_dir, "r+") as installed_pkgs:
pkgs_installed = installed_pkgs.read().split('\n')
for pkg_installed in pkgs_installed[:]:
pkg = pkg_installed.split()[0]
if pkg in ["update-rc.d",
"base-passwd",
self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True)
]:
pkgs_to_remove.append(pkg)
pkgs_installed.remove(pkg_installed)
if len(pkgs_to_remove) > 0:
self.pm.remove(pkgs_to_remove, False)
# Update installed_pkgs.txt
open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed))
if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
self._exec_shell_cmd(["update-rc.d", "-f", "-r",
self.d.getVar('IMAGE_ROOTFS', True),
"run-postinsts", "remove"])
else:
self._save_postinsts()
self.pm.remove_packaging_data()
def _run_intercepts(self):
intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
"intercept_scripts")
bb.note("Running intercept scripts:")
os.environ['D'] = self.image_rootfs
for script in os.listdir(intercepts_dir):
script_full = os.path.join(intercepts_dir, script)
if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
continue
bb.note("> Executing %s intercept ..." % script)
try:
subprocess.check_output(script_full)
except subprocess.CalledProcessError as e:
bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
(script, e.returncode))
with open(script_full) as intercept:
registered_pkgs = None
for line in intercept.read().split("\n"):
m = re.match("^##PKGS:(.*)", line)
if m is not None:
registered_pkgs = m.group(1).strip()
break
if registered_pkgs is not None:
bb.warn("The postinstalls for the following packages "
"will be postponed for first boot: %s" %
registered_pkgs)
# call the backend dependent handler
self._handle_intercept_failure(registered_pkgs)
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND', True):
bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v'])
def _generate_kernel_module_deps(self):
kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True),
'kernel-abiversion')
if os.path.exists(kernel_abi_ver_file):
kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver)
bb.utils.mkdirhier(modules_dir)
self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs,
kernel_ver])
"""
Create devfs:
* IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
* IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
for in the BBPATH
If neither are specified then the default name of files/device_table-minimal.txt
is searched for in the BBPATH (same as the old version.)
"""
def _create_devfs(self):
devtable_list = []
devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
if devtable is not None:
devtable_list.append(devtable)
else:
devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
self.image_rootfs, "-D", devtable])
class RpmRootfs(Rootfs):
def __init__(self, d, manifest_dir):
super(RpmRootfs, self).__init__(d)
self.manifest = RpmManifest(d, manifest_dir)
self.pm = RpmPM(d,
d.getVar('IMAGE_ROOTFS', True),
self.d.getVar('TARGET_VENDOR', True)
)
self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
if self.inc_rpm_image_gen != "1":
bb.utils.remove(self.image_rootfs, True)
else:
self.pm.recovery_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
self.pm.create_configs()
'''
While rpm incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the new install solution manifest and the
old installed manifest.
'''
def _create_incremental(self, pkgs_initial_install):
if self.inc_rpm_image_gen == "1":
pkgs_to_install = list()
for pkg_type in pkgs_initial_install:
pkgs_to_install += pkgs_initial_install[pkg_type]
installed_manifest = self.pm.load_old_install_solution()
solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
pkg_to_remove = list()
for pkg in installed_manifest:
if pkg not in solution_manifest:
pkg_to_remove.append(pkg)
self.pm.update()
bb.note('incremental update -- upgrade packages in place ')
self.pm.upgrade()
if pkg_to_remove != []:
bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
# update PM index files
self.pm.write_index()
self.pm.dump_all_available_pkgs()
if self.inc_rpm_image_gen == "1":
self._create_incremental(pkgs_to_install)
self.pm.update()
pkgs = []
pkgs_attempt = []
for pkg_type in pkgs_to_install:
if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
pkgs_attempt += pkgs_to_install[pkg_type]
else:
pkgs += pkgs_to_install[pkg_type]
self.pm.install(pkgs)
self.pm.install(pkgs_attempt, True)
self.pm.install_complementary()
self._log_check()
if self.inc_rpm_image_gen == "1":
self.pm.backup_packaging_data()
self.pm.rpm_setup_smart_target_config()
def _get_delayed_postinsts(self):
postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
if os.path.isdir(postinst_dir):
files = os.listdir(postinst_dir)
for f in files:
bb.note('Delayed package scriptlet: %s' % f)
return files
return None
def _save_postinsts(self):
# this is just a stub. For RPM, the failed postinstalls are
# already saved in /etc/rpm-postinsts
pass
def _log_check(self):
r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)')
log_path = self.d.expand("${T}/log.do_rootfs")
with open(log_path, 'r') as log:
found_error = 0
message = "\n"
for line in log.read().split('\n'):
if 'log_check' in line:
continue
m = r.search(line)
if m:
found_error = 1
bb.warn('log_check: There were error messages in the logfile')
bb.warn('log_check: Matched keyword: [%s]\n\n' % m.group())
if found_error >= 1 and found_error <= 5:
message += line + '\n'
found_error += 1
if found_error == 6:
bb.fatal(message)
def _handle_intercept_failure(self, registered_pkgs):
rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
bb.utils.mkdirhier(rpm_postinsts_dir)
# Save the package postinstalls in /etc/rpm-postinsts
for pkg in registered_pkgs.split():
self.pm.save_rpmpostinst(pkg)
def _cleanup(self):
# during the execution of postprocess commands, rpm is called several
# times to get the files installed, dependencies, etc. This creates the
# __db.00* (Berkeley DB files that hold locks, rpm specific environment
# settings, etc.), that should not get into the final rootfs
self.pm.unlock_rpm_db()
class DpkgRootfs(Rootfs):
def __init__(self, d, manifest_dir):
super(DpkgRootfs, self).__init__(d)
bb.utils.remove(self.image_rootfs, True)
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
self.manifest = DpkgManifest(d, manifest_dir)
self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
d.getVar('PACKAGE_ARCHS', True),
d.getVar('DPKG_ARCH', True))
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
bb.utils.mkdirhier(alt_dir)
# update PM index files
self.pm.write_index()
self.pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
self.pm.install_complementary()
self.pm.fix_broken_dependencies()
self.pm.mark_packages("installed")
self.pm.run_pre_post_installs()
def _get_delayed_postinsts(self):
pkg_list = []
with open(self.image_rootfs + "/var/lib/dpkg/status") as status:
for line in status:
m_pkg = re.match("^Package: (.*)", line)
m_status = re.match("^Status:.*unpacked", line)
if m_pkg is not None:
pkg_name = m_pkg.group(1)
elif m_status is not None:
pkg_list.append(pkg_name)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts(self):
num = 0
for p in self._get_delayed_postinsts():
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
def _handle_intercept_failure(self, registered_pkgs):
self.pm.mark_packages("unpacked", registered_pkgs.split())
def _log_check(self):
pass
def _cleanup(self):
pass
class OpkgRootfs(Rootfs):
def __init__(self, d, manifest_dir):
super(OpkgRootfs, self).__init__(d)
self.manifest = OpkgManifest(d, manifest_dir)
self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
if self._remove_old_rootfs():
bb.utils.remove(self.image_rootfs, True)
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
else:
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
self.pm.recover_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
def _prelink_file(self, root_dir, filename):
bb.note('prelink %s in %s' % (filename, root_dir))
prelink_cfg = oe.path.join(root_dir,
self.d.expand('${sysconfdir}/prelink.conf'))
if not os.path.exists(prelink_cfg):
shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
prelink_cfg)
cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
self._exec_shell_cmd([cmd_prelink,
'--root',
root_dir,
'-amR',
'-N',
'-c',
self.d.expand('${sysconfdir}/prelink.conf')])
'''
Compare two files with the same key twice to see if they are equal.
If they are not equal, it means they are duplicated and come from
different packages.
1st: Comapre them directly;
2nd: While incremental image creation is enabled, one of the
files could be probaly prelinked in the previous image
creation and the file has been changed, so we need to
prelink the other one and compare them.
'''
def _file_equal(self, key, f1, f2):
# Both of them are not prelinked
if filecmp.cmp(f1, f2):
return True
if self.image_rootfs not in f1:
self._prelink_file(f1.replace(key, ''), f1)
if self.image_rootfs not in f2:
self._prelink_file(f2.replace(key, ''), f2)
# Both of them are prelinked
if filecmp.cmp(f1, f2):
return True
# Not equal
return False
"""
This function was reused from the old implementation.
See commit: "image.bbclass: Added variables for multilib support." by
Lianhao Lu.
"""
def _multilib_sanity_test(self, dirs):
allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
if allow_replace is None:
allow_replace = ""
allow_rep = re.compile(re.sub("\|$", "", allow_replace))
error_prompt = "Multilib check error:"
files = {}
for dir in dirs:
for root, subfolders, subfiles in os.walk(dir):
for file in subfiles:
item = os.path.join(root, file)
key = str(os.path.join("/", os.path.relpath(item, dir)))
valid = True
if key in files:
#check whether the file is allow to replace
if allow_rep.match(key):
valid = True
else:
if os.path.exists(files[key]) and \
os.path.exists(item) and \
not self._file_equal(key, files[key], item):
valid = False
bb.fatal("%s duplicate files %s %s is not the same\n" %
(error_prompt, item, files[key]))
#pass the check, add to list
if valid:
files[key] = item
def _multilib_test_install(self, pkgs):
ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
bb.utils.mkdirhier(ml_temp)
dirs = [self.image_rootfs]
for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
ml_target_rootfs = os.path.join(ml_temp, variant)
bb.utils.remove(ml_target_rootfs, True)
ml_opkg_conf = os.path.join(ml_temp,
variant + "-" + os.path.basename(self.opkg_conf))
ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
ml_pm.update()
ml_pm.install(pkgs)
dirs.append(ml_target_rootfs)
self._multilib_sanity_test(dirs)
'''
While ipk incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the old full manifest in previous existing
image and the new full manifest in the current image.
'''
def _remove_extra_packages(self, pkgs_initial_install):
if self.inc_opkg_image_gen == "1":
# Parse full manifest in previous existing image creation session
old_full_manifest = self.manifest.parse_full_manifest()
# Create full manifest for the current image session, the old one
# will be replaced by the new one.
self.manifest.create_full(self.pm)
# Parse full manifest in current image creation session
new_full_manifest = self.manifest.parse_full_manifest()
pkg_to_remove = list()
for pkg in old_full_manifest:
if pkg not in new_full_manifest:
pkg_to_remove.append(pkg)
if pkg_to_remove != []:
bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
'''
Compare with previous existing image creation, if some conditions
triggered, the previous old image should be removed.
The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
and BAD_RECOMMENDATIONS' has been changed.
'''
def _remove_old_rootfs(self):
if self.inc_opkg_image_gen != "1":
return True
vars_list_file = self.d.expand('${T}/vars_list')
old_vars_list = ""
if os.path.exists(vars_list_file):
old_vars_list = open(vars_list_file, 'r+').read()
new_vars_list = '%s:%s:%s\n' % \
((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
(self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
(self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
open(vars_list_file, 'w+').write(new_vars_list)
if old_vars_list != new_vars_list:
return True
return False
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
# update PM index files, unless users provide their own feeds
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
self.pm.write_index()
execute_pre_post_process(self.d, opkg_pre_process_cmds)
self.pm.update()
self.pm.handle_bad_recommendations()
if self.inc_opkg_image_gen == "1":
self._remove_extra_packages(pkgs_to_install)
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
# For multilib, we perform a sanity test before final install
# If sanity test fails, it will automatically do a bb.fatal()
# and the installation will stop
if pkg_type == Manifest.PKG_TYPE_MULTILIB:
self._multilib_test_install(pkgs_to_install[pkg_type])
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
self.pm.install_complementary()
execute_pre_post_process(self.d, opkg_post_process_cmds)
execute_pre_post_process(self.d, rootfs_post_install_cmds)
if self.inc_opkg_image_gen == "1":
self.pm.backup_packaging_data()
def _get_delayed_postinsts(self):
pkg_list = []
status_file = os.path.join(self.image_rootfs,
self.d.getVar('OPKGLIBDIR', True).strip('/'),
"opkg", "status")
with open(status_file) as status:
for line in status:
m_pkg = re.match("^Package: (.*)", line)
m_status = re.match("^Status:.*unpacked", line)
if m_pkg is not None:
pkg_name = m_pkg.group(1)
elif m_status is not None:
pkg_list.append(pkg_name)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts(self):
num = 0
for p in self._get_delayed_postinsts():
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
def _handle_intercept_failure(self, registered_pkgs):
self.pm.mark_packages("unpacked", registered_pkgs.split())
def _log_check(self):
pass
def _cleanup(self):
pass
def create_rootfs(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE', True)
if img_type == "rpm":
RpmRootfs(d, manifest_dir).create()
elif img_type == "ipk":
OpkgRootfs(d, manifest_dir).create()
elif img_type == "deb":
DpkgRootfs(d, manifest_dir).create()
os.environ.clear()
os.environ.update(env_bkp)
def image_list_installed_packages(d, format=None, rootfs_dir=None):
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
img_type = d.getVar('IMAGE_PKGTYPE', True)
if img_type == "rpm":
return RpmPkgsList(d, rootfs_dir).list(format)
elif img_type == "ipk":
return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format)
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list(format)
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""
########NEW FILE########
__FILENAME__ = sdk
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
import shutil
import glob
class Sdk(object):
__metaclass__ = ABCMeta
def __init__(self, d, manifest_dir):
self.d = d
self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
self.sdk_host_sysroot = self.sdk_output
if manifest_dir is None:
self.manifest_dir = self.d.getVar("SDK_DIR", True)
else:
self.manifest_dir = manifest_dir
bb.utils.remove(self.sdk_output, True)
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _populate(self):
pass
def populate(self):
bb.utils.mkdirhier(self.sdk_output)
# call backend dependent implementation
self._populate()
# Don't ship any libGL in the SDK
bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk', True).strip('/'),
"libGL*"))
# Fix or remove broken .la files
bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk', True).strip('/'),
"*.la"))
# Link the ld.so.cache file into the hosts filesystem
link_name = os.path.join(self.sdk_output, self.sdk_native_path,
self.sysconfdir, "ld.so.cache")
bb.utils.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
class RpmSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(RpmSdk, self).__init__(d, manifest_dir)
self.target_manifest = RpmManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = RpmManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
target_providename = ['/bin/sh',
'/bin/bash',
'/usr/bin/env',
'/usr/bin/perl',
'pkgconfig'
]
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
self.d.getVar('TARGET_VENDOR', True),
'target',
target_providename
)
sdk_providename = ['/bin/sh',
'/bin/bash',
'/usr/bin/env',
'/usr/bin/perl',
'pkgconfig',
'libGL.so()(64bit)',
'libGL.so'
]
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
self.d.getVar('SDK_VENDOR', True),
'host',
sdk_providename,
"SDK_PACKAGE_ARCHS",
"SDK_OS"
)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.create_configs()
pm.write_index()
pm.dump_all_available_pkgs()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
self.host_pm.remove_packaging_data()
# Move host RPM library data
native_rpm_state_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk', True).strip('/'),
"lib",
"rpm"
)
bb.utils.mkdirhier(native_rpm_state_dir)
for f in glob.glob(os.path.join(self.sdk_output,
"var",
"lib",
"rpm",
"*")):
bb.utils.movefile(f, native_rpm_state_dir)
bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
# Move host sysconfig data
native_sysconf_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('sysconfdir',
True).strip('/'),
)
bb.utils.mkdirhier(native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
bb.utils.movefile(f, native_sysconf_dir)
bb.utils.remove(os.path.join(self.sdk_output, "etc"), True)
class OpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(OpkgSdk, self).__init__(d, manifest_dir)
self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
self.target_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = OpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
self.d.getVar("SDK_PACKAGE_ARCHS", True))
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
bb.utils.mkdirhier(target_sysconfdir)
shutil.copy(self.target_conf, target_sysconfdir)
os.chmod(os.path.join(target_sysconfdir,
os.path.basename(self.target_conf)), 0644)
bb.utils.mkdirhier(host_sysconfdir)
shutil.copy(self.host_conf, host_sysconfdir)
os.chmod(os.path.join(host_sysconfdir,
os.path.basename(self.host_conf)), 0644)
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk', True).strip('/'),
"lib", "opkg")
bb.utils.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
bb.utils.movefile(f, native_opkg_state_dir)
bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
class DpkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(DpkgSdk, self).__init__(d, manifest_dir)
self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
self.target_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = DpkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
self.d.getVar("PACKAGE_ARCHS", True),
self.d.getVar("DPKG_ARCH", True),
self.target_conf_dir)
self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
self.d.getVar("SDK_PACKAGE_ARCHS", True),
self.d.getVar("DEB_SDK_ARCH", True),
self.host_conf_dir)
def _copy_apt_dir_to(self, dst_dir):
staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
bb.utils.remove(dst_dir, True)
shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
"etc", "apt"))
native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
"var", "lib", "dpkg")
bb.utils.mkdirhier(native_dpkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
bb.utils.movefile(f, native_dpkg_state_dir)
bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None):
if rootfs_dir is None:
sdk_output = d.getVar('SDK_OUTPUT', True)
target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
img_type = d.getVar('IMAGE_PKGTYPE', True)
if img_type == "rpm":
arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
os_var = ["SDK_OS", None][target is True]
return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format)
elif img_type == "ipk":
conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_Target"][target is True]
return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format)
elif img_type == "deb":
return DpkgPkgsList(d, rootfs_dir).list(format)
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE', True)
if img_type == "rpm":
RpmSdk(d, manifest_dir).populate()
elif img_type == "ipk":
OpkgSdk(d, manifest_dir).populate()
elif img_type == "deb":
DpkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
if __name__ == "__main__":
pass
########NEW FILE########
__FILENAME__ = sstatesig
import bb.siggen
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
def isCross(x):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
def isKernel(fn):
inherits = " ".join(dataCache.inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
def isPackageGroup(fn):
inherits = " ".join(dataCache.inherits[fn])
return "/packagegroup.bbclass" in inherits
def isImage(fn):
return "/image.bbclass" in " ".join(dataCache.inherits[fn])
# Always include our own inter-task dependencies
if recipename == depname:
return True
# Quilt (patch application) changing isn't likely to affect anything
excludelist = ['quilt-native', 'subversion-native', 'git-native']
if depname in excludelist and recipename != depname:
return False
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
# Only target packages beyond here
# packagegroups are assumed to have well behaved names which don't change between architecures/tunes
if isPackageGroup(fn):
return False
# Exclude well defined machine specific configurations which don't change ABI
if depname in siggen.abisaferecipes and not isImage(fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
# if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
depfn = dep.rsplit(".", 1)[0]
if dataCache and isKernel(depfn) and not isKernel(fn):
for pkg in dataCache.runrecs[fn]:
if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
return True
class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
name = "OEBasic"
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
pass
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
name = "OEBasicHash"
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
pass
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
def find_siginfo(pn, taskname, taskhashlist, d):
""" Find signature data files for comparison purposes """
import fnmatch
import glob
if taskhashlist:
hashfiles = {}
if not taskname:
# We have to derive pn and taskname
key = pn
splitit = key.split('.bb.')
taskname = splitit[1]
pn = os.path.basename(splitit[0]).split('_')[0]
if key.startswith('virtual:native:'):
pn = pn + '-native'
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']:
pn.replace("-native", "")
filedates = {}
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
localdata.setVar('PN', pn)
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('EXTENDPE', '')
stamp = localdata.getVar('STAMP', True)
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
hashfiles[taskhash] = fullpath
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
filedates[fullpath] = os.stat(fullpath).st_mtime
if not taskhashlist or (len(filedates) < 2 and not foundall):
# That didn't work, look in sstate-cache
hashes = taskhashlist or ['*']
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
if pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
sstatename = taskname[3:]
filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
if hashval != '*':
sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
else:
sstatedir = d.getVar('SSTATE_DIR', True)
for root, dirs, files in os.walk(sstatedir):
for fn in files:
fullpath = os.path.join(root, fn)
if fnmatch.fnmatch(fullpath, filespec):
if taskhashlist:
hashfiles[hashval] = fullpath
else:
filedates[fullpath] = os.stat(fullpath).st_mtime
if taskhashlist:
return hashfiles
else:
return filedates
bb.siggen.find_siginfo = find_siginfo
########NEW FILE########
__FILENAME__ = terminal
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
logger = logging.getLogger('BitBake.OE.Terminal')
class UnsupportedTerminal(Exception):
pass
class NoSupportedTerminals(Exception):
pass
class Registry(oe.classutils.ClassRegistry):
command = None
def __init__(cls, name, bases, attrs):
super(Registry, cls).__init__(name.lower(), bases, attrs)
@property
def implemented(cls):
return bool(cls.command)
class Terminal(Popen):
__metaclass__ = Registry
def __init__(self, sh_cmd, title=None, env=None, d=None):
fmt_sh_cmd = self.format_command(sh_cmd, title)
try:
Popen.__init__(self, fmt_sh_cmd, env=env)
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
raise UnsupportedTerminal(self.name)
else:
raise
def format_command(self, sh_cmd, title):
fmt = {'title': title or 'Terminal', 'command': sh_cmd}
if isinstance(self.command, basestring):
return shlex.split(self.command.format(**fmt))
else:
return [element.format(**fmt) for element in self.command]
class XTerminal(Terminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
Terminal.__init__(self, sh_cmd, title, env, d)
if not os.environ.get('DISPLAY'):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
command = 'gnome-terminal -t "{title}" -x {command}'
priority = 2
class Mate(XTerminal):
command = 'mate-terminal -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
command = 'xfce4-terminal -T "{title}" -e "{command}"'
priority = 2
class Konsole(XTerminal):
command = 'konsole -T "{title}" -e {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_konsole_version("konsole")
if vernum:
if vernum.split('.')[0] == "2":
logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping')
raise UnsupportedTerminal(self.name)
XTerminal.__init__(self, sh_cmd, title, env, d)
class XTerm(XTerminal):
command = 'xterm -T "{title}" -e {command}'
priority = 1
class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
def __init__(self, sh_cmd, title=None, env=None, d=None):
s_id = "devshell_%i" % os.getpid()
self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
Terminal.__init__(self, sh_cmd, title, env, d)
msg = 'Screen started. Please connect in another terminal with ' \
'"screen -r %s"' % s_id
if (d):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
logger.warn(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
command = 'tmux split-window "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
if not os.getenv('TMUX'):
raise UnsupportedTerminal('tmux is not running')
Terminal.__init__(self, sh_cmd, title, env, d)
class Tmux(Terminal):
"""Start a new tmux session and window"""
command = 'tmux new -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
# TODO: consider using a 'devshell' session shared amongst all
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
logger.warn(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
priority = 3
def __init__(self, sh_cmd, title=None, env=None, d=None):
self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
if self.command:
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
logger.warn('Custom terminal was started.')
else:
logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
def prioritized():
return Registry.prioritized()
def spawn_preferred(sh_cmd, title=None, env=None, d=None):
"""Spawn the first supported terminal, by priority"""
for terminal in prioritized():
try:
spawn(terminal.name, sh_cmd, title, env, d)
break
except UnsupportedTerminal:
continue
else:
raise NoSupportedTerminals()
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
logger.debug(1, 'Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
raise UnsupportedTerminal(name)
pipe = terminal(sh_cmd, title, env, d)
output = pipe.communicate()[0]
if pipe.returncode != 0:
raise ExecutionError(sh_cmd, pipe.returncode, output)
def check_konsole_version(konsole):
import subprocess as sub
try:
p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
ver_info = out.rstrip().split('\n')
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
return None
else:
raise
vernum = None
for ver in ver_info:
if ver.startswith('Konsole'):
vernum = ver.split(' ')[-1]
return vernum
def distro_name():
try:
p = Popen(['lsb_release', '-i'])
out, err = p.communicate()
distro = out.split(':')[1].strip().lower()
except:
distro = "unknown"
return distro
########NEW FILE########
__FILENAME__ = test_license
import unittest
import oe.license
class SeenVisitor(oe.license.LicenseVisitor):
def __init__(self):
self.seen = []
oe.license.LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.seen.append(node.s)
class TestSingleLicense(unittest.TestCase):
licenses = [
"GPLv2",
"LGPL-2.0",
"Artistic",
"MIT",
"GPLv3+",
"FOO_BAR",
]
invalid_licenses = ["GPL/BSD"]
@staticmethod
def parse(licensestr):
visitor = SeenVisitor()
visitor.visit_string(licensestr)
return visitor.seen
def test_single_licenses(self):
for license in self.licenses:
licenses = self.parse(license)
self.assertListEqual(licenses, [license])
def test_invalid_licenses(self):
for license in self.invalid_licenses:
with self.assertRaises(oe.license.InvalidLicense) as cm:
self.parse(license)
self.assertEqual(cm.exception.license, license)
class TestSimpleCombinations(unittest.TestCase):
tests = {
"FOO&BAR": ["FOO", "BAR"],
"BAZ & MOO": ["BAZ", "MOO"],
"ALPHA|BETA": ["ALPHA"],
"BAZ&MOO|FOO": ["FOO"],
"FOO&BAR|BAZ": ["FOO", "BAR"],
}
preferred = ["ALPHA", "FOO", "BAR"]
def test_tests(self):
def choose(a, b):
if all(lic in self.preferred for lic in b):
return b
else:
return a
for license, expected in self.tests.items():
licenses = oe.license.flattened_licenses(license, choose)
self.assertListEqual(licenses, expected)
class TestComplexCombinations(TestSimpleCombinations):
tests = {
"FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
"(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
"((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
"(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
}
preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
########NEW FILE########
__FILENAME__ = test_path
import unittest
import oe, oe.path
import tempfile
import os
import errno
import shutil
class TestRealPath(unittest.TestCase):
DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
FILES = [ "etc/passwd", "b/file" ]
LINKS = [
( "bin", "/usr/bin", "/usr/bin" ),
( "binX", "usr/binX", "/usr/binX" ),
( "c", "broken", "/broken" ),
( "etc/passwd-1", "passwd", "/etc/passwd" ),
( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
( "usr/binX/prog-E", "../sbin/prog-E", None ),
( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
( "loop", "a/loop", None ),
( "a/loop", "../loop", None ),
( "b/test", "file/foo", "/b/file/foo" ),
]
LINKS_PHYS = [
( "./", "/", "" ),
( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
]
EXCEPTIONS = [
( "loop", errno.ELOOP ),
( "b/test", errno.ENOENT ),
]
def __del__(self):
try:
#os.system("tree -F %s" % self.tmpdir)
shutil.rmtree(self.tmpdir)
except:
pass
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
self.root = os.path.join(self.tmpdir, "R")
os.mkdir(os.path.join(self.tmpdir, "_real"))
os.symlink("_real", self.root)
for d in self.DIRS:
os.mkdir(os.path.join(self.root, d))
for f in self.FILES:
file(os.path.join(self.root, f), "w")
for l in self.LINKS:
os.symlink(l[1], os.path.join(self.root, l[0]))
def __realpath(self, file, use_physdir, assume_dir = True):
return oe.path.realpath(os.path.join(self.root, file), self.root,
use_physdir, assume_dir = assume_dir)
def test_norm(self):
for l in self.LINKS:
if l[2] == None:
continue
target_p = self.__realpath(l[0], True)
target_l = self.__realpath(l[0], False)
if l[2] != False:
self.assertEqual(target_p, target_l)
self.assertEqual(l[2], target_p[len(self.root):])
def test_phys(self):
for l in self.LINKS_PHYS:
target_p = self.__realpath(l[0], True)
target_l = self.__realpath(l[0], False)
self.assertEqual(l[1], target_p[len(self.root):])
self.assertEqual(l[2], target_l[len(self.root):])
def test_loop(self):
for e in self.EXCEPTIONS:
self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1],
self.__realpath, e[0], False, False)
########NEW FILE########
__FILENAME__ = test_types
import unittest
from oe.maketype import create, factory
class TestTypes(unittest.TestCase):
def assertIsInstance(self, obj, cls):
return self.assertTrue(isinstance(obj, cls))
def assertIsNot(self, obj, other):
return self.assertFalse(obj is other)
def assertFactoryCreated(self, value, type, **flags):
cls = factory(type)
self.assertIsNot(cls, None)
self.assertIsInstance(create(value, type, **flags), cls)
class TestBooleanType(TestTypes):
def test_invalid(self):
self.assertRaises(ValueError, create, '', 'boolean')
self.assertRaises(ValueError, create, 'foo', 'boolean')
self.assertRaises(TypeError, create, object(), 'boolean')
def test_true(self):
self.assertTrue(create('y', 'boolean'))
self.assertTrue(create('yes', 'boolean'))
self.assertTrue(create('1', 'boolean'))
self.assertTrue(create('t', 'boolean'))
self.assertTrue(create('true', 'boolean'))
self.assertTrue(create('TRUE', 'boolean'))
self.assertTrue(create('truE', 'boolean'))
def test_false(self):
self.assertFalse(create('n', 'boolean'))
self.assertFalse(create('no', 'boolean'))
self.assertFalse(create('0', 'boolean'))
self.assertFalse(create('f', 'boolean'))
self.assertFalse(create('false', 'boolean'))
self.assertFalse(create('FALSE', 'boolean'))
self.assertFalse(create('faLse', 'boolean'))
def test_bool_equality(self):
self.assertEqual(create('n', 'boolean'), False)
self.assertNotEqual(create('n', 'boolean'), True)
self.assertEqual(create('y', 'boolean'), True)
self.assertNotEqual(create('y', 'boolean'), False)
class TestList(TestTypes):
def assertListEqual(self, value, valid, sep=None):
obj = create(value, 'list', separator=sep)
self.assertEqual(obj, valid)
if sep is not None:
self.assertEqual(obj.separator, sep)
self.assertEqual(str(obj), obj.separator.join(obj))
def test_list_nosep(self):
testlist = ['alpha', 'beta', 'theta']
self.assertListEqual('alpha beta theta', testlist)
self.assertListEqual('alpha beta\ttheta', testlist)
self.assertListEqual('alpha', ['alpha'])
def test_list_usersep(self):
self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
########NEW FILE########
__FILENAME__ = test_utils
import unittest
from oe.utils import packages_filter_out_system
class TestPackagesFilterOutSystem(unittest.TestCase):
def test_filter(self):
"""
Test that oe.utils.packages_filter_out_system works.
"""
try:
import bb
except ImportError:
self.skipTest("Cannot import bb")
d = bb.data_smart.DataSmart()
d.setVar("PN", "foo")
d.setVar("PACKAGES", "foo foo-doc foo-dev")
pkgs = packages_filter_out_system(d)
self.assertEqual(pkgs, [])
d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
pkgs = packages_filter_out_system(d)
self.assertEqual(pkgs, ["foo-data"])
d.setVar("PACKAGES", "foo foo-locale-en-gb")
pkgs = packages_filter_out_system(d)
self.assertEqual(pkgs, [])
d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
pkgs = packages_filter_out_system(d)
self.assertEqual(pkgs, ["foo-data"])
class TestTrimVersion(unittest.TestCase):
def test_version_exception(self):
with self.assertRaises(TypeError):
trim_version(None, 2)
with self.assertRaises(TypeError):
trim_version((1, 2, 3), 2)
def test_num_exception(self):
with self.assertRaises(ValueError):
trim_version("1.2.3", 0)
with self.assertRaises(ValueError):
trim_version("1.2.3", -1)
def test_valid(self):
self.assertEqual(trim_version("1.2.3", 1), "1")
self.assertEqual(trim_version("1.2.3", 2), "1.2")
self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
########NEW FILE########
__FILENAME__ = types
import errno
import re
import os
class OEList(list):
"""OpenEmbedded 'list' type
Acts as an ordinary list, but is constructed from a string value and a
separator (optional), and re-joins itself when converted to a string with
str(). Set the variable type flag to 'list' to use this type, and the
'separator' flag may be specified (defaulting to whitespace)."""
name = "list"
def __init__(self, value, separator = None):
if value is not None:
list.__init__(self, value.split(separator))
else:
list.__init__(self)
if separator is None:
self.separator = " "
else:
self.separator = separator
def __str__(self):
return self.separator.join(self)
def choice(value, choices):
"""OpenEmbedded 'choice' type
Acts as a multiple choice for the user. To use this, set the variable
type flag to 'choice', and set the 'choices' flag to a space separated
list of valid values."""
if not isinstance(value, basestring):
raise TypeError("choice accepts a string, not '%s'" % type(value))
value = value.lower()
choices = choices.lower()
if value not in choices.split():
raise ValueError("Invalid choice '%s'. Valid choices: %s" %
(value, choices))
return value
class NoMatch(object):
"""Stub python regex pattern object which never matches anything"""
def findall(self, string, flags=0):
return None
def finditer(self, string, flags=0):
return None
def match(self, flags=0):
return None
def search(self, string, flags=0):
return None
def split(self, string, maxsplit=0):
return None
def sub(pattern, repl, string, count=0):
return None
def subn(pattern, repl, string, count=0):
return None
NoMatch = NoMatch()
def regex(value, regexflags=None):
"""OpenEmbedded 'regex' type
Acts as a regular expression, returning the pre-compiled regular
expression pattern object. To use this type, set the variable type flag
to 'regex', and optionally, set the 'regexflags' type to a space separated
list of the flags to control the regular expression matching (e.g.
FOO[regexflags] += 'ignorecase'). See the python documentation on the
're' module for a list of valid flags."""
flagval = 0
if regexflags:
for flag in regexflags.split():
flag = flag.upper()
try:
flagval |= getattr(re, flag)
except AttributeError:
raise ValueError("Invalid regex flag '%s'" % flag)
if not value:
# Let's ensure that the default behavior for an undefined or empty
# variable is to match nothing. If the user explicitly wants to match
# anything, they can match '.*' instead.
return NoMatch
try:
return re.compile(value, flagval)
except re.error as exc:
raise ValueError("Invalid regex value '%s': %s" %
(value, exc.args[0]))
def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
Valid values for false: 'no', 'n', 'false', 'f', '0'
"""
if not isinstance(value, basestring):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
value = value.lower()
if value in ('yes', 'y', 'true', 't', '1'):
return True
elif value in ('no', 'n', 'false', 'f', '0'):
return False
raise ValueError("Invalid boolean value '%s'" % value)
def integer(value, numberbase=10):
"""OpenEmbedded 'integer' type
Defaults to base 10, but this can be specified using the optional
'numberbase' flag."""
return int(value, int(numberbase))
_float = float
def float(value, fromhex='false'):
"""OpenEmbedded floating point type
To use this type, set the type flag to 'float', and optionally set the
'fromhex' flag to a true value (obeying the same rules as for the
'boolean' type) if the value is in base 16 rather than base 10."""
if boolean(fromhex):
return _float.fromhex(value)
else:
return _float(value)
def path(value, relativeto='', normalize='true', mustexist='false'):
value = os.path.join(relativeto, value)
if boolean(normalize):
value = os.path.normpath(value)
if boolean(mustexist):
try:
open(value, 'r')
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
########NEW FILE########
__FILENAME__ = utils
try:
# Python 2
import commands as cmdstatus
except ImportError:
# Python 3
import subprocess as cmdstatus
def read_file(filename):
try:
f = open( filename, "r" )
except IOError as reason:
return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
else:
data = f.read().strip()
f.close()
return data
return None
def ifelse(condition, iftrue = True, iffalse = False):
if condition:
return iftrue
else:
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
if d.getVar(variable,1) == checkvalue:
return truevalue
else:
return falsevalue
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
if float(d.getVar(variable,1)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1:
return checkvalue
else:
return ""
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if var.endswith(suffix):
var = var.replace(suffix, "")
prefix = d.getVar("MLPREFIX", True)
if prefix and var.startswith(prefix):
var = var.replace(prefix, "")
return var
def str_filter(f, str, d):
from re import match
return " ".join(filter(lambda x: match(f, x, 0), str.split()))
def str_filter_out(f, str, d):
from re import match
return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
def param_bool(cfg, field, dflt = None):
"""Lookup <field> in <cfg> map and convert it to a boolean; take
<dflt> when this <field> does not exist"""
value = cfg.get(field, dflt)
strvalue = str(value).lower()
if strvalue in ('yes', 'y', 'true', 't', '1'):
return True
elif strvalue in ('no', 'n', 'false', 'f', '0'):
return False
raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
return any(bb.data.inherits_class(cls, d) for cls in classes)
def features_backfill(var,d):
# This construct allows the addition of new features to variable specified
# as var
# Example for var = "DISTRO_FEATURES"
# This construct allows the addition of new features to DISTRO_FEATURES
# that if not present would disable existing functionality, without
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
features = (d.getVar(var, True) or "").split()
backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
addfeatures = []
for feature in backfill:
if feature not in features and feature not in considered:
addfeatures.append(feature)
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN', True)
blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev'))
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES', True).split():
if pkg not in blacklist and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
return cmdstatus.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
"""
Return just the first <num_parts> of <version>, split by periods. For
example, trim_version("1.2.3", 2) will return "1.2".
"""
if type(version) is not str:
raise TypeError("Version should be a string")
if num_parts < 1:
raise ValueError("Cannot split to parts < 1")
parts = version.split(".")
trimmed = ".".join(parts[:num_parts])
return trimmed
def cpu_count():
import multiprocessing
return multiprocessing.cpu_count()
def execute_pre_post_process(d, cmds):
if cmds is None:
return
for cmd in cmds.strip().split(';'):
cmd = cmd.strip()
if cmd != '':
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
########NEW FILE########
__FILENAME__ = masterimage
# Copyright (C) 2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module adds support to testimage.bbclass to deploy images and run
# tests using a "master image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
# with no interaction; we can then use it to deploy the image to be tested
# to a second partition before running the tests.
#
# For an example master image, see core-image-testmaster
# (meta/recipes-extended/images/core-image-testmaster.bb)
import os
import bb
import traceback
import time
import subprocess
import oeqa.targetcontrol
import oeqa.utils.sshcontrol as sshcontrol
import oeqa.utils.commands as commands
from oeqa.utils import CommandError
from abc import ABCMeta, abstractmethod
class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget):
__metaclass__ = ABCMeta
def __init__(self, d):
super(MasterImageHardwareTarget, self).__init__(d)
# target ip
addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
self.server_ip = d.getVar("TEST_SERVER_IP", True)
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
bb.note("Server IP: %s" % self.server_ip)
# test rootfs + kernel
self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.tar.gz')
self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE"))
if not os.path.isfile(self.rootfs):
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
# the same as the config with which the image was build, ie
# you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
# and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
\nExpected path: %s" % self.rootfs)
if not os.path.isfile(self.kernel):
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
# master ssh connection
self.master = None
# if the user knows what they are doing, then by all means...
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True)
self.deploy_cmds = None
# this is the name of the command that controls the power for a board
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD", True) or None
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
self.origenv = os.environ
if self.powercontrol_cmd or self.serialcontrol_cmd:
# the external script for controlling power might use ssh
# ssh + keys means we need the original user env
bborigenv = d.getVar("BB_ORIGENV", False) or {}
for key in bborigenv:
val = bborigenv.getVar(key, True)
if val is not None:
self.origenv[key] = str(val)
if self.powercontrol_cmd:
if self.powercontrol_args:
self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
if self.serialcontrol_cmd:
if self.serialcontrol_args:
self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
def power_ctl(self, msg):
if self.powercontrol_cmd:
cmd = "%s %s" % (self.powercontrol_cmd, msg)
try:
commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
except CommandError as e:
bb.fatal(str(e))
def power_cycle(self, conn):
if self.powercontrol_cmd:
# be nice, don't just cut power
conn.run("shutdown -h now")
time.sleep(10)
self.power_ctl("cycle")
else:
status, output = conn.run("reboot")
if status != 0:
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
def _wait_until_booted(self):
''' Waits until the target device has booted (if we have just power cycled it) '''
# Subclasses with better methods of determining boot can override this
time.sleep(120)
def deploy(self):
# base class just sets the ssh log file for us
super(MasterImageHardwareTarget, self).deploy()
self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
# We're not booted into the master image, so try rebooting
bb.plain("%s - booting into the master image" % self.pn)
self.power_ctl("cycle")
self._wait_until_booted()
bb.plain("%s - deploying image on target" % self.pn)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
if self.user_cmds:
self.deploy_cmds = self.user_cmds.split("\n")
try:
self._deploy()
except Exception as e:
bb.fatal("Failed deploying test image: %s" % e)
@abstractmethod
def _deploy(self):
pass
def start(self, params=None):
bb.plain("%s - boot test image on target" % self.pn)
self._start()
# set the ssh object for the target/test image
self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
bb.plain("%s - start running tests" % self.pn)
@abstractmethod
def _start(self):
pass
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
self.power_cycle(self.connection)
class GummibootTarget(MasterImageHardwareTarget):
def __init__(self, d):
super(GummibootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xzvf ~/test-rootfs.tar.gz -C /mnt/testrootfs',
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.master.ignore_status = False
self.master.copy_to(self.rootfs, "~/test-rootfs.tar.gz")
self.master.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.master.run(cmd)
def _start(self, params=None):
self.power_cycle(self.master)
# there are better ways than a timeout but this should work for now
time.sleep(120)
########NEW FILE########
__FILENAME__ = testtargetloader
import types
import bb
# This class is responsible for loading a test target controller
class TestTargetLoader:
# Search oeqa.controllers module directory for and return a controller
# corresponding to the given target name.
# AttributeError raised if not found.
# ImportError raised if a provided module can not be imported.
def get_controller_module(self, target, bbpath):
controllerslist = self.get_controller_modulenames(bbpath)
bb.note("Available controller modules: %s" % str(controllerslist))
controller = self.load_controller_from_name(target, controllerslist)
return controller
# Return a list of all python modules in lib/oeqa/controllers for each
# layer in bbpath
def get_controller_modulenames(self, bbpath):
controllerslist = []
def add_controller_list(path):
if not os.path.exists(os.path.join(path, '__init__.py')):
bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
module = 'oeqa.controllers.' + f[:-3]
if module not in controllerslist:
controllerslist.append(module)
else:
bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
for p in bbpath:
controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
bb.debug(2, 'Searching for target controllers in %s' % controllerpath)
if os.path.exists(controllerpath):
add_controller_list(controllerpath)
return controllerslist
# Search for and return a controller from given target name and
# set of module names.
# Raise AttributeError if not found.
# Raise ImportError if a provided module can not be imported
def load_controller_from_name(self, target, modulenames):
for name in modulenames:
obj = self.load_controller_from_module(target, name)
if obj:
return obj
raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
# Search for and return a controller or None from given module name
def load_controller_from_module(self, target, modulename):
obj = None
# import module, allowing it to raise import exception
module = __import__(modulename, globals(), locals(), [target])
# look for target class in the module, catching any exceptions as it
# is valid that a module may not have the target class.
try:
obj = getattr(module, target)
if obj:
from oeqa.targetcontrol import BaseTarget
if (not isinstance(obj, (type, types.ClassType))):
bb.warn("Target {0} found, but not of type Class".format(target))
if( not issubclass(obj, BaseTarget)):
bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
except:
obj = None
return obj
########NEW FILE########
__FILENAME__ = oetest
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Main unittest module used by testimage.bbclass
# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
# It also has some helper functions and it's responsible for actually starting the tests
import os, re, mmap
import unittest
import inspect
def loadTests(tc):
# set the context object passed from the test class
setattr(oeTest, "tc", tc)
# set ps command to use
setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps")
# prepare test suite, loader and runner
suite = unittest.TestSuite()
testloader = unittest.TestLoader()
testloader.sortTestMethodsUsing = None
suite = testloader.loadTestsFromNames(tc.testslist)
return suite
def runTests(tc):
suite = loadTests(tc)
print("Test modules %s" % tc.testslist)
print("Found %s tests" % suite.countTestCases())
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
return result
class oeTest(unittest.TestCase):
longMessage = True
testFailures = []
testSkipped = []
testErrors = []
def run(self, result=None):
super(oeTest, self).run(result)
# we add to our own lists the results, we use those for decorators
if len(result.failures) > len(oeTest.testFailures):
oeTest.testFailures.append(str(result.failures[-1][0]).split()[0])
if len(result.skipped) > len(oeTest.testSkipped):
oeTest.testSkipped.append(str(result.skipped[-1][0]).split()[0])
if len(result.errors) > len(oeTest.testErrors):
oeTest.testErrors.append(str(result.errors[-1][0]).split()[0])
@classmethod
def hasPackage(self, pkg):
if re.search(pkg, oeTest.tc.pkgmanifest):
return True
return False
@classmethod
def hasFeature(self,feature):
if feature in oeTest.tc.imagefeatures or \
feature in oeTest.tc.distrofeatures:
return True
else:
return False
class oeRuntimeTest(oeTest):
def __init__(self, methodName='runTest'):
self.target = oeRuntimeTest.tc.target
super(oeRuntimeTest, self).__init__(methodName)
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
# First element of the list the is current frame, caller is 1
frameinfo = inspect.stack()[pos]
modname = inspect.getmodulename(frameinfo[1])
#modname = inspect.getmodule(frameinfo[0]).__name__
return modname
def skipModule(reason, pos=2):
modname = getmodule(pos)
if modname not in oeTest.tc.testsrequired:
raise unittest.SkipTest("%s: %s" % (modname, reason))
else:
raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
"\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
"\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
def skipModuleIf(cond, reason):
if cond:
skipModule(reason, 3)
def skipModuleUnless(cond, reason):
if not cond:
skipModule(reason, 3)
########NEW FILE########
__FILENAME__ = runexported
#!/usr/bin/env python
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This script should be used outside of the build system to run image tests.
# It needs a json file as input as exported by the build.
# E.g for an already built image:
#- export the tests:
# TEST_EXPORT_ONLY = "1"
# TEST_TARGET = "simpleremote"
# TEST_TARGET_IP = "192.168.7.2"
# TEST_SERVER_IP = "192.168.7.1"
# bitbake core-image-sato -c testimage
# Setup your target, e.g for qemu: runqemu core-image-sato
# cd build/tmp/testimage/core-image-sato
# ./runexported.py testdata.json
import sys
import os
import time
from optparse import OptionParser
try:
import simplejson as json
except ImportError:
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa")))
from oeqa.oetest import runTests
from oeqa.utils.sshcontrol import SSHControl
# this isn't pretty but we need a fake target object
# for running the tests externally as we don't care
# about deploy/start we only care about the connection methods (run, copy)
class FakeTarget(object):
def __init__(self, d):
self.connection = None
self.ip = None
self.server_ip = None
self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
self.testdir = d.getVar("TEST_LOG_DIR", True)
self.pn = d.getVar("PN", True)
def exportStart(self):
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
sshloglink = os.path.join(self.testdir, "ssh_target_log")
if os.path.islink(sshloglink):
os.unlink(sshloglink)
os.symlink(self.sshlog, sshloglink)
print("SSH log file: %s" % self.sshlog)
self.connection = SSHControl(self.ip, logfile=self.sshlog)
def run(self, cmd, timeout=None):
return self.connection.run(cmd, timeout)
def copy_to(self, localpath, remotepath):
return self.connection.copy_to(localpath, remotepath)
def copy_from(self, remotepath, localpath):
return self.connection.copy_from(remotepath, localpath)
class MyDataDict(dict):
def getVar(self, key, unused = None):
return self.get(key, "")
class TestContext(object):
def __init__(self):
self.d = None
self.target = None
def main():
usage = "usage: %prog [options] <json file>"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
overwrite the value determined from TEST_TARGET_IP at build time")
parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
overwrite the value determined from TEST_SERVER_IP at build time.")
parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
specified in the json if that directory actually exists or it will error out.")
parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system")
with open(args[0], "r") as f:
loaded = json.load(f)
if options.ip:
loaded["target"]["ip"] = options.ip
if options.server_ip:
loaded["target"]["server_ip"] = options.server_ip
d = MyDataDict()
for key in loaded["d"].keys():
d[key] = loaded["d"][key]
if options.log_dir:
d["TEST_LOG_DIR"] = options.log_dir
else:
d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
if options.deploy_dir:
d["DEPLOY_DIR"] = options.deploy_dir
else:
if not os.path.isdir(d["DEPLOY_DIR"]):
raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"])
target = FakeTarget(d)
for key in loaded["target"].keys():
setattr(target, key, loaded["target"][key])
tc = TestContext()
setattr(tc, "d", d)
setattr(tc, "target", target)
for key in loaded.keys():
if key != "d" and key != "target":
setattr(tc, key, loaded[key])
target.exportStart()
runTests(tc)
return 0
if __name__ == "__main__":
try:
ret = main()
except Exception:
ret = 1
import traceback
traceback.print_exc(5)
sys.exit(ret)
########NEW FILE########
__FILENAME__ = buildcvs
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
from oeqa.utils.targetbuild import TargetBuildProject
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class BuildCvsTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
"http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2")
self.project.download_archive()
@skipUnlessPassed("test_ssh")
def test_cvs(self):
self.assertEqual(self.project.run_configure(), 0,
msg="Running configure failed")
self.assertEqual(self.project.run_make(), 0,
msg="Running make failed")
self.assertEqual(self.project.run_install(), 0,
msg="Running make install failed")
@classmethod
def tearDownClass(self):
self.project.clean()
########NEW FILE########
__FILENAME__ = buildiptables
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
from oeqa.utils.targetbuild import TargetBuildProject
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class BuildIptablesTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
"http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2")
self.project.download_archive()
@skipUnlessPassed("test_ssh")
def test_iptables(self):
self.assertEqual(self.project.run_configure(), 0,
msg="Running configure failed")
self.assertEqual(self.project.run_make(), 0,
msg="Running make failed")
self.assertEqual(self.project.run_install(), 0,
msg="Running make install failed")
@classmethod
def tearDownClass(self):
self.project.clean()
########NEW FILE########
__FILENAME__ = buildsudoku
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
from oeqa.utils.targetbuild import TargetBuildProject
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class SudokuTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
"http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2")
self.project.download_archive()
@skipUnlessPassed("test_ssh")
def test_sudoku(self):
self.assertEqual(self.project.run_configure(), 0,
msg="Running configure failed")
self.assertEqual(self.project.run_make(), 0,
msg="Running make failed")
@classmethod
def tearDownClass(self):
self.project.clean()
########NEW FILE########
__FILENAME__ = connman
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("connman"):
skipModule("No connman package in image")
class ConnmanTest(oeRuntimeTest):
def service_status(self, service):
if oeRuntimeTest.hasFeature("systemd"):
(status, output) = self.target.run('systemctl status -l %s' % service)
return output
else:
return "Unable to get status or logs for %s" % service
@skipUnlessPassed('test_ssh')
def test_connmand_help(self):
(status, output) = self.target.run('/usr/sbin/connmand --help')
self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
@skipUnlessPassed('test_connmand_help')
def test_connmand_running(self):
(status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand')
if status != 0:
print self.service_status("connman")
self.fail("No connmand process running")
########NEW FILE########
__FILENAME__ = date
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
import re
class DateTest(oeRuntimeTest):
@skipUnlessPassed("test_ssh")
def test_date(self):
(status, output) = self.target.run('date +"%Y-%m-%d %T"')
self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output)
oldDate = output
sampleDate = '"2016-08-09 10:00:00"'
(status, output) = self.target.run("date -s %s" % sampleDate)
self.assertEqual(status, 0, msg="Date set failed, output: %s" % output)
(status, output) = self.target.run("date -R")
p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
self.assertTrue(p, msg="The date was not set correctly, output: %s" % output)
(status, output) = self.target.run('date -s "%s"' % oldDate)
self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output)
########NEW FILE########
__FILENAME__ = df
import unittest
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
class DfTest(oeRuntimeTest):
@skipUnlessPassed("test_ssh")
def test_df(self):
(status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'")
self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output)
########NEW FILE########
__FILENAME__ = dmesg
import unittest
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
class DmesgTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_dmesg(self):
(status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -iv "dma timeout" | grep -i error')
self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output)
########NEW FILE########
__FILENAME__ = test
import os
os.system('touch /tmp/testfile.python')
a = 9.01e+21 - 9.01e+21 + 0.01
print "the value of a is %s" % a
########NEW FILE########
__FILENAME__ = gcc
import unittest
import os
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class GccCompileTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c")
oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile")
def test_gcc_compile(self):
(status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output)
(status, output) = self.target.run('/tmp/test')
self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
def test_gpp_compile(self):
(status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output)
(status, output) = self.target.run('/tmp/test')
self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
def test_make(self):
(status, output) = self.target.run('cd /tmp; make -f testmakefile')
self.assertEqual(status, 0, msg="running make failed, output %s" % output)
@classmethod
def tearDownClass(self):
oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile")
########NEW FILE########
__FILENAME__ = kernelmodule
import unittest
import os
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class KernelModuleTest(oeRuntimeTest):
def setUp(self):
self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c")
self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile")
@skipUnlessPassed('test_ssh')
@skipUnlessPassed('test_gcc_compile')
def test_kernel_module(self):
cmds = [
'cd /usr/src/kernel && make scripts',
'cd /tmp && make',
'cd /tmp && insmod hellomod.ko',
'lsmod | grep hellomod',
'dmesg | grep Hello',
'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
]
for cmd in cmds:
(status, output) = self.target.run(cmd, 900)
self.assertEqual(status, 0, msg="\n".join([cmd, output]))
def tearDown(self):
self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c')
########NEW FILE########
__FILENAME__ = ldd
import unittest
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("tools-sdk"):
skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
class LddTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_ldd_exists(self):
(status, output) = self.target.run('which ldd')
self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output)
@skipUnlessPassed('test_ldd_exists')
def test_ldd_rtldlist_check(self):
(status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done')
self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ")
########NEW FILE########
__FILENAME__ = logrotate
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
# Note that the image under test must have logrotate installed
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("logrotate"):
skipModule("No logrotate package in image")
class LogrotateTest(oeRuntimeTest):
@skipUnlessPassed("test_ssh")
def test_1_logrotate_setup(self):
(status, output) = self.target.run('mkdir /home/root/logrotate_dir')
self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output)
(status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf")
self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output))
@skipUnlessPassed("test_1_logrotate_setup")
def test_2_logrotate(self):
(status, output) = self.target.run('logrotate -f /etc/logrotate.conf')
self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output))
output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1]
self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1]))
########NEW FILE########
__FILENAME__ = multilib
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or ""
if "multilib:lib32" not in multilibs:
skipModule("this isn't a multilib:lib32 image")
class MultilibTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_file_connman(self):
self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed")
(status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'")
self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1])
########NEW FILE########
__FILENAME__ = pam
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
# Note that the image under test must have "pam" in DISTRO_FEATURES
import unittest
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("pam"):
skipModule("target doesn't have 'pam' in DISTRO_FEATURES")
class PamBasicTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_pam(self):
(status, output) = self.target.run('login --help')
self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output))
(status, output) = self.target.run('passwd --help')
self.assertEqual(status, 6, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output))
(status, output) = self.target.run('su --help')
self.assertEqual(status, 2, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output))
(status, output) = self.target.run('useradd --help')
self.assertEqual(status, 2, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output))
########NEW FILE########
__FILENAME__ = perl
import unittest
import os
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("perl"):
skipModule("No perl package in the image")
class PerlTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl")
def test_perl_exists(self):
(status, output) = self.target.run('which perl')
self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.")
def test_perl_works(self):
(status, output) = self.target.run('perl /tmp/test.pl')
self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
@classmethod
def tearDownClass(self):
oeRuntimeTest.tc.target.run("rm /tmp/test.pl")
########NEW FILE########
__FILENAME__ = ping
import subprocess
import unittest
import sys
import time
from oeqa.oetest import oeRuntimeTest
class PingTest(oeRuntimeTest):
def test_ping(self):
output = ''
count = 0
endtime = time.time() + 60
while count < 5 and time.time() < endtime:
proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE)
output += proc.communicate()[0]
if proc.poll() == 0:
count += 1
else:
count = 0
self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output))
########NEW FILE########
__FILENAME__ = python
import unittest
import os
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("python"):
skipModule("No python package in the image")
class PythonTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py")
def test_python_exists(self):
(status, output) = self.target.run('which python')
self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.")
def test_python_stdout(self):
(status, output) = self.target.run('python /tmp/test.py')
self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
def test_python_testfile(self):
(status, output) = self.target.run('ls /tmp/testfile.python')
self.assertEqual(status, 0, msg="Python test file generate failed.")
@classmethod
def tearDownClass(self):
oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python")
########NEW FILE########
__FILENAME__ = rpm
import unittest
import os
import fnmatch
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("package-management"):
skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES")
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
skipModule("rpm module skipped: target doesn't have rpm as primary package manager")
class RpmBasicTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_rpm_help(self):
(status, output) = self.target.run('rpm --help')
self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
@skipUnlessPassed('test_rpm_help')
def test_rpm_query(self):
(status, output) = self.target.run('rpm -q rpm')
self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
class RpmInstallRemoveTest(oeRuntimeTest):
@classmethod
def setUpClass(self):
pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_")
rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch)
# pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets
for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch):
testrpmfile = f
oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm")
@skipUnlessPassed('test_rpm_help')
def test_rpm_install(self):
(status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output)
@skipUnlessPassed('test_rpm_install')
def test_rpm_remove(self):
(status,output) = self.target.run('rpm -e rpm-doc')
self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output)
@classmethod
def tearDownClass(self):
oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm')
########NEW FILE########
__FILENAME__ = scanelf
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("pax-utils"):
skipModule("pax-utils package not installed")
class ScanelfTest(oeRuntimeTest):
def setUp(self):
self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
@skipUnlessPassed('test_ssh')
def test_scanelf_textrel(self):
# print TEXTREL information
self.scancmd += " --textrel"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
@skipUnlessPassed('test_ssh')
def test_scanelf_rpath(self):
# print RPATH information
self.scancmd += " --rpath"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
########NEW FILE########
__FILENAME__ = scp
import os
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import skipUnlessPassed
def setUpModule():
if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")):
skipModule("No ssh package in image")
class ScpTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_scp_file(self):
test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True)
test_file_path = os.path.join(test_log_dir, 'test_scp_file')
with open(test_file_path, 'w') as test_scp_file:
test_scp_file.seek(2 ** 22 - 1)
test_scp_file.write(os.linesep)
(status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file')
self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output)
(status, output) = self.target.run("ls -la /tmp/test_scp_file")
self.assertEqual(status, 0, msg = "SCP test failed")
########NEW FILE########
__FILENAME__ = skeletoninit
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase
# Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf
import unittest
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("service"):
skipModule("No service package in image")
class SkeletonBasicTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
@unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
def test_skeleton_availability(self):
(status, output) = self.target.run('ls /etc/init.d/skeleton')
self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output)
(status, output) = self.target.run('ls /usr/sbin/skeleton-test')
self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output)
@skipUnlessPassed('test_skeleton_availability')
@unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
def test_skeleton_script(self):
output1 = self.target.run("/etc/init.d/skeleton start")[1]
(status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test')
self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2))
########NEW FILE########
__FILENAME__ = smart
import unittest
import re
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
from oeqa.utils.httpserver import HTTPService
def setUpModule():
if not oeRuntimeTest.hasFeature("package-management"):
skipModule("Image doesn't have package management feature")
if not oeRuntimeTest.hasPackage("smart"):
skipModule("Image doesn't have smart installed")
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
skipModule("Rpm is not the primary package manager")
class SmartTest(oeRuntimeTest):
@skipUnlessPassed('test_smart_help')
def smart(self, command, expected = 0):
command = 'smart %s' % command
status, output = self.target.run(command, 1500)
message = os.linesep.join([command, output])
self.assertEqual(status, expected, message)
self.assertFalse("Cannot allocate memory" in output, message)
return output
class SmartBasicTest(SmartTest):
@skipUnlessPassed('test_ssh')
def test_smart_help(self):
self.smart('--help')
def test_smart_version(self):
self.smart('--version')
def test_smart_info(self):
self.smart('info python-smartpm')
def test_smart_query(self):
self.smart('query python-smartpm')
def test_smart_search(self):
self.smart('search python-smartpm')
def test_smart_stats(self):
self.smart('stats')
class SmartRepoTest(SmartTest):
@classmethod
def setUpClass(self):
self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
self.repo_server.start()
@classmethod
def tearDownClass(self):
self.repo_server.stop()
def test_smart_channel(self):
self.smart('channel', 1)
def test_smart_channel_add(self):
image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
if arch in pkgarchs:
self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url))
self.smart('update')
def test_smart_channel_help(self):
self.smart('channel --help')
def test_smart_channel_list(self):
self.smart('channel --list')
def test_smart_channel_show(self):
self.smart('channel --show')
def test_smart_channel_rpmsys(self):
self.smart('channel --show rpmsys')
self.smart('channel --disable rpmsys')
self.smart('channel --enable rpmsys')
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install(self):
self.smart('remove -y psplash-default')
self.smart('install -y psplash-default')
@skipUnlessPassed('test_smart_install')
def test_smart_install_dependency(self):
self.smart('remove -y psplash')
self.smart('install -y psplash-default')
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install_from_disk(self):
self.smart('remove -y psplash-default')
self.smart('download psplash-default')
self.smart('install -y ./psplash-default*')
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install_from_http(self):
output = self.smart('download --urls psplash-default')
url = re.search('(http://.*/psplash-default.*\.rpm)', output)
self.assertTrue(url, msg="Couln't find download url in %s" % output)
self.smart('remove -y psplash-default')
self.smart('install -y %s' % url.group(0))
@skipUnlessPassed('test_smart_install')
def test_smart_reinstall(self):
self.smart('reinstall -y psplash-default')
########NEW FILE########
__FILENAME__ = ssh
import subprocess
import unittest
import sys
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")):
skipModule("No ssh package in image")
class SshTest(oeRuntimeTest):
@skipUnlessPassed('test_ping')
def test_ssh(self):
(status, output) = self.target.run('uname -a')
self.assertEqual(status, 0, msg="SSH Test failed: %s" % output)
(status, output) = self.target.run('cat /etc/masterimage')
self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output)
########NEW FILE########
__FILENAME__ = syslog
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("syslog"):
skipModule("No syslog package in image")
class SyslogTest(oeRuntimeTest):
@skipUnlessPassed("test_ssh")
def test_syslog_help(self):
(status,output) = self.target.run('/sbin/syslogd --help')
self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
@skipUnlessPassed("test_syslog_help")
def test_syslog_running(self):
(status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd')
self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1])
class SyslogTestConfig(oeRuntimeTest):
@skipUnlessPassed("test_syslog_running")
def test_syslog_logger(self):
(status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar')
self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output)
@skipUnlessPassed("test_syslog_running")
def test_syslog_restart(self):
if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"):
(status,output) = self.target.run('/etc/init.d/syslog restart')
else:
(status,output) = self.target.run('systemctl restart syslog.service')
@skipUnlessPassed("test_syslog_restart")
@skipUnlessPassed("test_syslog_logger")
@unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
def test_syslog_startup_config(self):
self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf')
(status,output) = self.target.run('/etc/init.d/syslog restart')
self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output))
(status,output) = self.target.run('logger foobar && grep foobar /var/log/test')
self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output)
self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf")
self.target.run('/etc/init.d/syslog restart')
########NEW FILE########
__FILENAME__ = systemd
import unittest
import re
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("systemd"):
skipModule("target doesn't have systemd in DISTRO_FEATURES")
if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True):
skipModule("systemd is not the init manager for this image")
class SystemdTest(oeRuntimeTest):
def systemctl(self, action = '', target = '', expected = 0, verbose = False):
command = 'systemctl %s %s' % (action, target)
status, output = self.target.run(command)
message = '\n'.join([command, output])
if status != expected and verbose:
message += self.target.run('systemctl status --full %s' % target)[1]
self.assertEqual(status, expected, message)
return output
class SystemdBasicTests(SystemdTest):
@skipUnlessPassed('test_ssh')
def test_systemd_basic(self):
self.systemctl('--version')
@skipUnlessPassed('test_system_basic')
def test_systemd_list(self):
self.systemctl('list-unit-files')
def settle(self):
"""
Block until systemd has finished activating any units being activated,
or until two minutes has elapsed.
Returns a tuple, either (True, '') if all units have finished
activating, or (False, message string) if there are still units
activating (generally, failing units that restart).
"""
import time
endtime = time.time() + (60 * 2)
while True:
status, output = self.target.run('systemctl --state=activating')
if "0 loaded units listed" in output:
return (True, '')
if time.time() >= endtime:
return (False, output)
time.sleep(10)
@skipUnlessPassed('test_systemd_basic')
def test_systemd_failed(self):
settled, output = self.settle()
self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output)
output = self.systemctl('list-units', '--failed')
match = re.search("0 loaded units listed", output)
if not match:
output += self.systemctl('status --full --failed')
self.assertTrue(match, msg="Some systemd units failed:\n%s" % output)
class SystemdServiceTests(SystemdTest):
@skipUnlessPassed('test_systemd_basic')
def test_systemd_status(self):
self.systemctl('status --full', 'avahi-daemon.service')
@skipUnlessPassed('test_systemd_status')
def test_systemd_stop_start(self):
self.systemctl('stop', 'avahi-daemon.service')
self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True)
self.systemctl('start','avahi-daemon.service')
self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
@skipUnlessPassed('test_systemd_basic')
def test_systemd_disable_enable(self):
self.systemctl('disable', 'avahi-daemon.service')
self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
self.systemctl('enable', 'avahi-daemon.service')
self.systemctl('is-enabled', 'avahi-daemon.service')
########NEW FILE########
__FILENAME__ = vnc
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import *
import re
def setUpModule():
skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image")
class VNCTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_vnc(self):
(status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log')
self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output)
port = re.search('PORT=[0-9]*', output)
self.assertTrue(port, msg="Listening port not specified in command output: %s" %output)
vncport = port.group(0).split('=')[1]
(status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport)
self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1]))
########NEW FILE########
__FILENAME__ = x32lib
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
#check if DEFAULTTUNE is set and it's value is: x86-64-x32
defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True)
if "x86-64-x32" not in defaulttune:
skipModule("DEFAULTTUNE is not set to x86-64-x32")
class X32libTest(oeRuntimeTest):
@skipUnlessPassed("test_ssh")
def test_x32_file(self):
status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0]
status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0]
self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1])
########NEW FILE########
__FILENAME__ = xorg
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasFeature("x11-base"):
skipModule("target doesn't have x11 in IMAGE_FEATURES")
class XorgTest(oeRuntimeTest):
@skipUnlessPassed('test_ssh')
def test_xorg_running(self):
(status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org')
self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1])
@skipUnlessPassed('test_ssh')
def test_xorg_error(self):
(status, output) = self.target.run('cat /var/log/Xorg.0.log | grep -v "(EE) error," | grep -v "PreInit" | grep -v "evdev:" | grep -v "glx" | grep "(EE)"')
self.assertEqual(status, 1, msg="Errors in Xorg log: %s" % output)
########NEW FILE########
__FILENAME__ = base
# Copyright (c) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# Base class inherited by test classes in meta/lib/selftest
import unittest
import os
import sys
import shutil
import logging
import errno
import oeqa.utils.ftools as ftools
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
class oeSelfTest(unittest.TestCase):
log = logging.getLogger("selftest.base")
longMessage = True
def __init__(self, methodName="runTest"):
self.builddir = os.environ.get("BUILDDIR")
self.localconf_path = os.path.join(self.builddir, "conf/local.conf")
self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc")
self.testlayer_path = oeSelfTest.testlayer_path
self._extra_tear_down_commands = []
self._track_for_cleanup = []
super(oeSelfTest, self).__init__(methodName)
def setUp(self):
os.chdir(self.builddir)
# we don't know what the previous test left around in config or inc files
# if it failed so we need a fresh start
try:
os.remove(self.testinc_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
for root, _, files in os.walk(self.testlayer_path):
for f in files:
if f == 'test_recipe.inc':
os.remove(os.path.join(root, f))
# tests might need their own setup
# but if they overwrite this one they have to call
# super each time, so let's give them an alternative
self.setUpLocal()
def setUpLocal(self):
pass
def tearDown(self):
if self._extra_tear_down_commands:
failed_extra_commands = []
for command in self._extra_tear_down_commands:
result = runCmd(command, ignore_status=True)
if not result.status == 0:
failed_extra_commands.append(command)
if failed_extra_commands:
self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
self.log.debug("Trying to move on.")
self._extra_tear_down_commands = []
if self._track_for_cleanup:
for path in self._track_for_cleanup:
if os.path.isdir(path):
shutil.rmtree(path)
if os.path.isfile(path):
os.remove(path)
self._track_for_cleanup = []
self.tearDownLocal()
def tearDownLocal(self):
pass
# add test specific commands to the tearDown method.
def add_command_to_tearDown(self, command):
self.log.debug("Adding command '%s' to tearDown for this test." % command)
self._extra_tear_down_commands.append(command)
# add test specific files or directories to be removed in the tearDown method
def track_for_cleanup(self, path):
self.log.debug("Adding path '%s' to be cleaned up when test is over" % path)
self._track_for_cleanup.append(path)
# write to <builddir>/conf/selftest.inc
def write_config(self, data):
self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
ftools.write_file(self.testinc_path, data)
# append to <builddir>/conf/selftest.inc
def append_config(self, data):
self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
ftools.append_file(self.testinc_path, data)
# remove data from <builddir>/conf/selftest.inc
def remove_config(self, data):
self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data))
ftools.remove_from_file(self.testinc_path, data)
# write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def write_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Writing to: %s\n%s\n" % (inc_file, data))
ftools.write_file(inc_file, data)
# append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def append_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Appending to: %s\n%s\n" % (inc_file, data))
ftools.append_file(inc_file, data)
# remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def remove_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Removing from: %s\n%s\n" % (inc_file, data))
ftools.remove_from_file(inc_file, data)
# delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file
def delete_recipeinc(self, recipe):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Deleting file: %s" % inc_file)
try:
os.remove(inc_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
########NEW FILE########
__FILENAME__ = bblayers
import unittest
import os
import logging
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd
class BitbakeLayers(oeSelfTest):
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
self.assertTrue('aspell' in result.output)
def test_bitbakelayers_showlayers(self):
result = runCmd('bitbake-layers show_layers')
self.assertTrue('meta-selftest' in result.output)
def test_bitbakelayers_showappends(self):
result = runCmd('bitbake-layers show_appends')
self.assertTrue('xcursor-transparent-theme_0.1.1.bbappend' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised')
def test_bitbakelayers_showoverlayed(self):
result = runCmd('bitbake-layers show_overlayed')
self.assertTrue('aspell' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised')
def test_bitbakelayers_flatten(self):
self.assertFalse(os.path.isdir(os.path.join(self.builddir, 'test')))
result = runCmd('bitbake-layers flatten test')
bb_file = os.path.join(self.builddir, 'test/recipes-graphics/xcursor-transparent-theme/xcursor-transparent-theme_0.1.1.bb')
self.assertTrue(os.path.isfile(bb_file))
contents = ftools.read_file(bb_file)
find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
shutil.rmtree(os.path.join(self.builddir, 'test'))
self.assertTrue(find_in_contents)
########NEW FILE########
__FILENAME__ = bbtests
import unittest
import os
import logging
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
class BitbakeTests(oeSelfTest):
def test_run_bitbake_from_dir_1(self):
os.chdir(os.path.join(self.builddir, 'conf'))
bitbake('-e')
def test_run_bitbake_from_dir_2(self):
my_env = os.environ.copy()
my_env['BBPATH'] = my_env['BUILDDIR']
os.chdir(os.path.dirname(os.environ['BUILDDIR']))
bitbake('-e', env=my_env)
def test_event_handler(self):
self.write_config("INHERIT += \"test_events\"")
result = bitbake('m4-native')
find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Preparing runqueue", result.output)
find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
self.assertFalse('Test for bb.event.InvalidEvent' in result.output)
def test_local_sstate(self):
bitbake('m4-native -ccleansstate')
bitbake('m4-native')
bitbake('m4-native -cclean')
result = bitbake('m4-native')
find_setscene = re.search("m4-native.*do_.*_setscene", result.output)
self.assertTrue(find_setscene)
def test_bitbake_invalid_recipe(self):
result = bitbake('-b asdf', ignore_status=True)
self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output)
def test_bitbake_invalid_target(self):
result = bitbake('asdf', ignore_status=True)
self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output)
def test_warnings_errors(self):
result = bitbake('-b asdf', ignore_status=True)
find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output)
self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
def test_invalid_patch(self):
self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"')
result = bitbake('man -c patch', ignore_status=True)
self.delete_recipeinc('man')
bitbake('-cclean man')
self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output)
def test_force_task(self):
bitbake('m4-native')
result = bitbake('-C compile m4-native')
look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot']
for task in look_for_tasks:
find_task = re.search("m4-native.*%s" % task, result.output)
self.assertTrue(find_task)
def test_bitbake_g(self):
result = bitbake('-g core-image-full-cmdline')
self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output)
self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist')))
for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']:
os.remove(f)
def test_image_manifest(self):
bitbake('core-image-minimal')
deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
manifest = os.path.join(deploydir, imagename + ".manifest")
self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image")
def test_invalid_recipe_src_uri(self):
data = 'SRC_URI = "file://invalid"'
self.write_recipeinc('man', data)
bitbake('-ccleanall man')
result = bitbake('-c fetch man', ignore_status=True)
bitbake('-ccleanall man')
self.delete_recipeinc('man')
self.assertEqual(result.status, 1, msg='Command succeded when it should have failed')
self.assertTrue('ERROR: Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output)
self.assertTrue('ERROR: Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.' in result.output)
def test_rename_downloaded_file(self):
data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"'
self.write_recipeinc('aspell', data)
bitbake('-ccleanall aspell')
result = bitbake('-c fetch aspell', ignore_status=True)
self.delete_recipeinc('aspell')
self.assertEqual(result.status, 0)
self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz')))
self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done')))
bitbake('-ccleanall aspell')
########NEW FILE########
__FILENAME__ = buildhistory
import unittest
import os
import re
import shutil
import datetime
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
class BuildhistoryBase(oeSelfTest):
def config_buildhistory(self, tmp_bh_location=False):
if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')):
add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"'
self.append_config(add_buildhistory_config)
if tmp_bh_location:
# Using a temporary buildhistory location for testing
tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir
self.append_config(buildhistory_dir_config)
self.track_for_cleanup(tmp_bh_dir)
def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''):
if change_bh_location:
tmp_bh_location = True
else:
tmp_bh_location = False
self.config_buildhistory(tmp_bh_location)
self.append_config(global_config)
self.append_recipeinc(target, target_config)
bitbake("-cclean %s" % target)
result = bitbake(target, ignore_status=True)
self.remove_config(global_config)
self.remove_recipeinc(target, target_config)
if expect_error:
self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config))
search_for_error = re.search(error_regex, result.output)
self.assertTrue(search_for_error, msg="Could not find desired error in output: %s" % error_regex)
else:
self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output))
########NEW FILE########
__FILENAME__ = buildoptions
import unittest
import os
import logging
import re
from oeqa.selftest.base import oeSelfTest
from oeqa.selftest.buildhistory import BuildhistoryBase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
import oeqa.utils.ftools as ftools
class ImageOptionsTests(oeSelfTest):
def test_incremental_image_generation(self):
bitbake("-c cleanall core-image-minimal")
self.write_config('INC_RPM_IMAGE_GEN = "1"')
self.append_config('IMAGE_FEATURES += "ssh-server-openssh"')
bitbake("core-image-minimal")
res = runCmd("grep 'Installing openssh-sshd' %s" % (os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")), ignore_status=True)
self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"')
self.assertEqual(0, res.status, msg="No match for openssh-sshd in log.do_rootfs")
bitbake("core-image-minimal")
res = runCmd("grep 'Removing openssh-sshd' %s" %(os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")),ignore_status=True)
self.assertEqual(0, res.status, msg="openssh-sshd was not removed from image")
def test_rm_old_image(self):
bitbake("core-image-minimal")
deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
deploydir_files = os.listdir(deploydir)
track_original_files = []
for image_file in deploydir_files:
if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)):
track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file)))
self.append_config("RM_OLD_IMAGE = \"1\"")
bitbake("-C rootfs core-image-minimal")
deploydir_files = os.listdir(deploydir)
remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files]
self.assertFalse(remaining_not_expected, msg="\nThe following image files ware not removed: %s" % ', '.join(map(str, remaining_not_expected)))
def test_ccache_tool(self):
bitbake("ccache-native")
self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache")))
self.write_config('INHERIT += "ccache"')
bitbake("m4 -c cleansstate")
bitbake("m4 -c compile")
res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True)
self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile")
bitbake("ccache-native -ccleansstate")
class DiskMonTest(oeSelfTest):
def test_stoptask_behavior(self):
self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
res = bitbake("m4", ignore_status = True)
self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output)
self.assertEqual(res.status, 1)
self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
res = bitbake("m4", ignore_status = True)
self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output)
self.assertEqual(res.status, 1)
self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
res = bitbake("m4")
self.assertTrue('WARNING: The free space' in res.output)
class SanityOptionsTest(oeSelfTest):
def test_options_warnqa_errorqa_switch(self):
bitbake("xcursor-transparent-theme -ccleansstate")
if "packages-list" not in get_bb_var("ERROR_QA"):
self.write_config("ERROR_QA_append = \" packages-list\"")
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
res = bitbake("xcursor-transparent-theme", ignore_status=True)
self.delete_recipeinc('xcursor-transparent-theme')
self.assertTrue("ERROR: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output)
self.assertEqual(res.status, 1)
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
self.append_config('ERROR_QA_remove = "packages-list"')
self.append_config('WARN_QA_append = " packages-list"')
res = bitbake("xcursor-transparent-theme")
bitbake("xcursor-transparent-theme -ccleansstate")
self.delete_recipeinc('xcursor-transparent-theme')
self.assertTrue("WARNING: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output)
def test_sanity_userspace_dependency(self):
self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"')
bitbake("-ccleansstate gzip nfs-utils")
res = bitbake("gzip nfs-utils")
self.assertTrue("WARNING: QA Issue: gzip" in res.output)
self.assertTrue("WARNING: QA Issue: nfs-utils" in res.output)
class BuildhistoryTests(BuildhistoryBase):
def test_buildhistory_basic(self):
self.run_buildhistory_operation('xcursor-transparent-theme')
self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR')))
def test_buildhistory_buildtime_pr_backwards(self):
self.add_command_to_tearDown('cleanup-workdir')
target = 'xcursor-transparent-theme'
error = "ERROR: QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1 to .*-r0)" % target
self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error)
########NEW FILE########
__FILENAME__ = oescripts
import datetime
import unittest
import os
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.selftest.buildhistory import BuildhistoryBase
from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
class TestScripts(oeSelfTest):
def test_cleanup_workdir(self):
path = os.path.dirname(get_bb_var('WORKDIR', 'gzip'))
old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb')
old_version = '1.3.12'
bitbake("-ccleansstate gzip")
bitbake("-ccleansstate -b %s" % old_version_recipe)
if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)):
shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe))
if os.path.exists(get_bb_var('WORKDIR', 'gzip')):
shutil.rmtree(get_bb_var('WORKDIR', 'gzip'))
if os.path.exists(path):
initial_contents = os.listdir(path)
else:
initial_contents = []
bitbake('gzip')
intermediary_contents = os.listdir(path)
bitbake("-b %s" % old_version_recipe)
runCmd('cleanup-workdir')
remaining_contents = os.listdir(path)
expected_contents = [x for x in intermediary_contents if x not in initial_contents]
remaining_not_expected = [x for x in remaining_contents if x not in expected_contents]
self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected))))
expected_not_remaining = [x for x in expected_contents if x not in remaining_contents]
self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining))))
class BuildhistoryDiffTests(BuildhistoryBase):
def test_buildhistory_diff(self):
self.add_command_to_tearDown('cleanup-workdir')
target = 'xcursor-transparent-theme'
self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
expected_output = 'PR changed from "r1" to "r0"'
self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output)
########NEW FILE########
__FILENAME__ = prservice
import unittest
import os
import logging
import re
import shutil
import datetime
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
class BitbakePrTests(oeSelfTest):
def get_pr_version(self, package_name):
pkgdata_dir = get_bb_var('PKGDATA_DIR')
package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name)
package_data = ftools.read_file(package_data_file)
find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data)
self.assertTrue(find_pr)
return int(find_pr.group(1))
def get_task_stamp(self, package_name, recipe_task):
stampdata = get_bb_var('STAMP', target=package_name).split('/')
prefix = stampdata[-1]
package_stamps_path = "/".join(stampdata[:-1])
stamps = []
for stamp in os.listdir(package_stamps_path):
find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (prefix, recipe_task), stamp)
if find_stamp:
stamps.append(find_stamp.group(1))
self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name))
self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path))
return str(stamps[0])
def increment_package_pr(self, package_name):
inc_data = "do_package_append() {\nbb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
self.write_recipeinc(package_name, inc_data)
bitbake("-ccleansstate %s" % package_name)
res = bitbake(package_name, ignore_status=True)
self.delete_recipeinc(package_name)
self.assertEqual(res.status, 0, msg=res.output)
self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output)
def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'):
config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type
self.write_config(config_package_data)
config_server_data = 'PRSERV_HOST = "%s"' % pr_socket
self.append_config(config_server_data)
def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'):
self.config_pr_tests(package_name, package_type, pr_socket)
self.increment_package_pr(package_name)
pr_1 = self.get_pr_version(package_name)
stamp_1 = self.get_task_stamp(package_name, track_task)
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
stamp_2 = self.get_task_stamp(package_name, track_task)
bitbake("-ccleansstate %s" % package_name)
self.assertTrue(pr_2 - pr_1 == 1)
self.assertTrue(stamp_1 != stamp_2)
def run_test_pr_export_import(self, package_name, replace_current_db=True):
self.config_pr_tests(package_name)
self.increment_package_pr(package_name)
pr_1 = self.get_pr_version(package_name)
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path)
os.remove(current_db_path)
import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True)
os.remove(exported_db_path)
self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output)
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
bitbake("-ccleansstate %s" % package_name)
self.assertTrue(pr_2 - pr_1 == 1)
def test_import_export_replace_db(self):
self.run_test_pr_export_import('m4')
def test_import_export_override_db(self):
self.run_test_pr_export_import('m4', replace_current_db=False)
def test_pr_service_rpm_arch_dep(self):
self.run_test_pr_service('m4', 'rpm', 'do_package')
def test_pr_service_deb_arch_dep(self):
self.run_test_pr_service('m4', 'deb', 'do_package')
def test_pr_service_ipk_arch_dep(self):
self.run_test_pr_service('m4', 'ipk', 'do_package')
def test_pr_service_rpm_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package')
def test_pr_service_deb_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package')
def test_pr_service_ipk_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package')
########NEW FILE########
__FILENAME__ = sstate
import datetime
import unittest
import os
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
class SStateBase(oeSelfTest):
def setUpLocal(self):
self.temp_sstate_location = None
self.sstate_path = get_bb_var('SSTATE_DIR')
self.distro = get_bb_var('NATIVELSBSTRING')
self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
# Creates a special sstate configuration with the option to add sstate mirrors
def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]):
self.temp_sstate_location = temp_sstate_location
if self.temp_sstate_location:
temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path
self.append_config(config_temp_sstate)
self.track_for_cleanup(temp_sstate_path)
self.sstate_path = get_bb_var('SSTATE_DIR')
self.distro = get_bb_var('NATIVELSBSTRING')
self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
if add_local_mirrors:
config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""'
self.append_config(config_set_sstate_if_not_set)
for local_mirror in add_local_mirrors:
self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror')
config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror
self.append_config(config_sstate_mirror)
# Returns a list containing sstate files
def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True):
result = []
for root, dirs, files in os.walk(self.sstate_path):
if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root):
for f in files:
if re.search(filename_regex, f):
result.append(f)
if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root):
for f in files:
if re.search(filename_regex, f):
result.append(f)
return result
########NEW FILE########
__FILENAME__ = sstatetests
import datetime
import unittest
import os
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
from oeqa.selftest.sstate import SStateBase
class SStateTests(SStateBase):
# Test sstate files creation and their location
def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True):
self.config_sstate(temp_sstate_location)
if self.temp_sstate_location:
bitbake(['-cclean'] + targets)
else:
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
file_tracker = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific)
if should_pass:
self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets)))
else:
self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s" % ', '.join(map(str, targets)))
def test_sstate_creation_distro_specific_pass(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
def test_sstate_creation_distro_specific_fail(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
def test_sstate_creation_distro_nonspecific_pass(self):
self.run_test_sstate_creation(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
def test_sstate_creation_distro_nonspecific_fail(self):
self.run_test_sstate_creation(['eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
# Test the sstate files deletion part of the do_cleansstate task
def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True):
self.config_sstate(temp_sstate_location)
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s" % ', '.join(map(str, targets)))
siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s" % ', '.join(map(str, targets)))
bitbake(['-ccleansstate'] + targets)
tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s" % ', '.join(map(str, targets)))
def test_cleansstate_task_distro_specific_nonspecific(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_cleansstate_task(['binutils-cross-' + targetarch, 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
def test_cleansstate_task_distro_nonspecific(self):
self.run_test_cleansstate_task(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
def test_cleansstate_task_distro_specific(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_cleansstate_task(['binutils-cross-'+ targetarch, 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
# Test rebuilding of distro-specific sstate files
def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True):
self.config_sstate(temp_sstate_location)
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
self.assertTrue(self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True) == [], msg="Found distro non-specific sstate for: %s" % ', '.join(map(str, targets)))
file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
self.track_for_cleanup(self.distro_specific_sstate + "_old")
shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old")
shutil.rmtree(self.distro_specific_sstate)
bitbake(['-cclean'] + targets)
bitbake(targets)
file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated)))
created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
def test_rebuild_distro_specific_sstate_cross_native_targets(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch, 'binutils-native'], temp_sstate_location=True)
def test_rebuild_distro_specific_sstate_cross_target(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch], temp_sstate_location=True)
def test_rebuild_distro_specific_sstate_native_target(self):
self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True)
# Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list
# global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE)
def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]):
self.assertTrue(global_config)
self.assertTrue(target_config)
self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements')
self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path])
# If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise.
if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')):
remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"'
self.append_config(remove_errors_config)
# For not this only checks if random sstate tasks are handled correctly as a group.
# In the future we should add control over what tasks we check for.
sstate_archs_list = []
expected_remaining_sstate = []
for idx in range(len(target_config)):
self.append_config(global_config[idx])
self.append_recipeinc(target, target_config[idx])
sstate_arch = get_bb_var('SSTATE_PKGARCH', target)
if not sstate_arch in sstate_archs_list:
sstate_archs_list.append(sstate_arch)
if target_config[idx] == target_config[-1]:
target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$')
bitbake("-cclean %s" % target)
result = bitbake(target, ignore_status=True)
if target_config[idx] == target_config[-1]:
target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$')
expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)]
self.remove_config(global_config[idx])
self.remove_recipeinc(target, target_config[idx])
self.assertEqual(result.status, 0)
runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list))))
actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
def test_sstate_cache_management_script_using_pr_1(self):
global_config = []
target_config = []
global_config.append('')
target_config.append('PR = "0"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
def test_sstate_cache_management_script_using_pr_2(self):
global_config = []
target_config = []
global_config.append('')
target_config.append('PR = "0"')
global_config.append('')
target_config.append('PR = "1"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
def test_sstate_cache_management_script_using_pr_3(self):
global_config = []
target_config = []
global_config.append('MACHINE = "qemux86-64"')
target_config.append('PR = "0"')
global_config.append(global_config[0])
target_config.append('PR = "1"')
global_config.append('MACHINE = "qemux86"')
target_config.append('PR = "1"')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
def test_sstate_cache_management_script_using_machine(self):
global_config = []
target_config = []
global_config.append('MACHINE = "qemux86-64"')
target_config.append('')
global_config.append('MACHINE = "qemux86"')
target_config.append('')
self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
########NEW FILE########
__FILENAME__ = _sstatetests_noauto
import datetime
import unittest
import os
import re
import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
from oeqa.selftest.sstate import SStateBase
class RebuildFromSState(SStateBase):
@classmethod
def setUpClass(self):
self.builddir = os.path.join(os.environ.get('BUILDDIR'))
def get_dep_targets(self, primary_targets):
found_targets = []
bitbake("-g " + ' '.join(map(str, primary_targets)))
with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile:
found_targets = pnfile.read().splitlines()
return found_targets
def configure_builddir(self, builddir):
os.mkdir(builddir)
self.track_for_cleanup(builddir)
os.mkdir(os.path.join(builddir, 'conf'))
shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf'))
config = {}
config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\""
config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\""
config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\""
for key in config:
ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key])
shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf'))
try:
shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf'))
except:
pass
def hardlink_tree(self, src, dst):
os.mkdir(dst)
self.track_for_cleanup(dst)
for root, dirs, files in os.walk(src):
if root == src:
continue
os.mkdir(os.path.join(dst, root.split(src)[1][1:]))
for sstate_file in files:
os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file))
def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False):
buildA = os.path.join(self.builddir, 'buildA')
if relocate:
buildB = os.path.join(self.builddir, 'buildB')
else:
buildB = buildA
if rebuild_dependencies:
rebuild_targets = self.get_dep_targets(primary_targets)
else:
rebuild_targets = primary_targets
self.configure_builddir(buildA)
runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash')
self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA'))
shutil.rmtree(buildA)
failed_rebuild = []
failed_cleansstate = []
for target in rebuild_targets:
self.configure_builddir(buildB)
self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache'))
result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash')
if not result_cleansstate.status == 0:
failed_cleansstate.append(target)
shutil.rmtree(buildB)
continue
result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash')
if not result_build.status == 0:
failed_rebuild.append(target)
shutil.rmtree(buildB)
self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild)))
self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate)))
def test_sstate_relocation(self):
self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True)
def test_sstate_rebuild(self):
self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True)
########NEW FILE########
__FILENAME__ = targetcontrol
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module is used by testimage.bbclass for setting up and controlling a target machine.
import os
import shutil
import subprocess
import bb
import traceback
import sys
from oeqa.utils.sshcontrol import SSHControl
from oeqa.utils.qemurunner import QemuRunner
from oeqa.controllers.testtargetloader import TestTargetLoader
from abc import ABCMeta, abstractmethod
def get_target_controller(d):
testtarget = d.getVar("TEST_TARGET", True)
# old, simple names
if testtarget == "qemu":
return QemuTarget(d)
elif testtarget == "simpleremote":
return SimpleRemoteTarget(d)
else:
# use the class name
try:
# is it a core class defined here?
controller = getattr(sys.modules[__name__], testtarget)
except AttributeError:
# nope, perhaps a layer defined one
try:
bbpath = d.getVar("BBPATH", True).split(':')
testtargetloader = TestTargetLoader()
controller = testtargetloader.get_controller_module(testtarget, bbpath)
except ImportError as e:
bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc()))
except AttributeError as e:
bb.fatal("Invalid TEST_TARGET - " + str(e))
return controller(d)
class BaseTarget(object):
__metaclass__ = ABCMeta
def __init__(self, d):
self.connection = None
self.ip = None
self.server_ip = None
self.datetime = d.getVar('DATETIME', True)
self.testdir = d.getVar("TEST_LOG_DIR", True)
self.pn = d.getVar("PN", True)
@abstractmethod
def deploy(self):
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
sshloglink = os.path.join(self.testdir, "ssh_target_log")
if os.path.islink(sshloglink):
os.unlink(sshloglink)
os.symlink(self.sshlog, sshloglink)
bb.note("SSH log file: %s" % self.sshlog)
@abstractmethod
def start(self, params=None):
pass
@abstractmethod
def stop(self):
pass
def restart(self, params=None):
self.stop()
self.start(params)
def run(self, cmd, timeout=None):
return self.connection.run(cmd, timeout)
def copy_to(self, localpath, remotepath):
return self.connection.copy_to(localpath, remotepath)
def copy_from(self, remotepath, localpath):
return self.connection.copy_from(remotepath, localpath)
class QemuTarget(BaseTarget):
def __init__(self, d):
super(QemuTarget, self).__init__(d)
self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime)
self.origrootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.ext3')
self.rootfs = os.path.join(self.testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.ext3')
self.runner = QemuRunner(machine=d.getVar("MACHINE", True),
rootfs=self.rootfs,
tmpdir = d.getVar("TMPDIR", True),
deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True),
display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True),
logfile = self.qemulog,
boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True)))
def deploy(self):
try:
shutil.copyfile(self.origrootfs, self.rootfs)
except Exception as e:
bb.fatal("Error copying rootfs: %s" % e)
qemuloglink = os.path.join(self.testdir, "qemu_boot_log")
if os.path.islink(qemuloglink):
os.unlink(qemuloglink)
os.symlink(self.qemulog, qemuloglink)
bb.note("rootfs file: %s" % self.rootfs)
bb.note("Qemu log file: %s" % self.qemulog)
super(QemuTarget, self).deploy()
def start(self, params=None):
if self.runner.start(params):
self.ip = self.runner.ip
self.server_ip = self.runner.server_ip
self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
else:
self.stop()
raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn)
def stop(self):
self.runner.stop()
self.connection = None
self.ip = None
self.server_ip = None
def restart(self, params=None):
if self.runner.restart(params):
self.ip = self.runner.ip
self.server_ip = self.runner.server_ip
self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
else:
raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn)
class SimpleRemoteTarget(BaseTarget):
def __init__(self, d):
super(SimpleRemoteTarget, self).__init__(d)
addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
self.server_ip = d.getVar("TEST_SERVER_IP", True)
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
bb.note("Server IP: %s" % self.server_ip)
def deploy(self):
super(SimpleRemoteTarget, self).deploy()
def start(self, params=None):
self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port)
def stop(self):
self.connection = None
self.ip = None
self.server_ip = None
########NEW FILE########
__FILENAME__ = commands
# Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
from oeqa.utils import CommandError
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
else:
raise CommandError(result.status, command, result.output)
return result
def bitbake(command, ignore_status=False, timeout=None, **options):
if isinstance(command, basestring):
cmd = "bitbake " + command
else:
cmd = [ "bitbake" ] + command
return runCmd(cmd, ignore_status, timeout, **options)
def get_bb_env(target=None):
if target:
return runCmd("bitbake -e %s" % target).output
else:
return runCmd("bitbake -e").output
def get_bb_var(var, target=None):
val = None
bbenv = get_bb_env(target)
for line in bbenv.splitlines():
if line.startswith(var + "="):
val = line.split('=')[1]
val = val.replace('\"','')
break
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
########NEW FILE########
__FILENAME__ = decorators
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Some custom decorators that can be used by unittests
# Most useful is skipUnlessPassed which can be used for
# creating dependecies between two test methods.
from oeqa.oetest import *
class skipIfFailure(object):
def __init__(self,testcase):
self.testcase = testcase
def __call__(self,f):
def wrapped_f(*args):
if self.testcase in (oeTest.testFailures or oeTest.testErrors):
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
return f(*args)
wrapped_f.__name__ = f.__name__
return wrapped_f
class skipIfSkipped(object):
def __init__(self,testcase):
self.testcase = testcase
def __call__(self,f):
def wrapped_f(*args):
if self.testcase in oeTest.testSkipped:
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
return f(*args)
wrapped_f.__name__ = f.__name__
return wrapped_f
class skipUnlessPassed(object):
def __init__(self,testcase):
self.testcase = testcase
def __call__(self,f):
def wrapped_f(*args):
if self.testcase in oeTest.testSkipped or \
self.testcase in oeTest.testFailures or \
self.testcase in oeTest.testErrors:
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
return f(*args)
wrapped_f.__name__ = f.__name__
return wrapped_f
########NEW FILE########
__FILENAME__ = ftools
import os
import re
def write_file(path, data):
wdata = data.rstrip() + "\n"
with open(path, "w") as f:
f.write(wdata)
def append_file(path, data):
wdata = data.rstrip() + "\n"
with open(path, "a") as f:
f.write(wdata)
def read_file(path):
data = None
with open(path) as f:
data = f.read()
return data
def remove_from_file(path, data):
lines = read_file(path).splitlines()
rmdata = data.strip().splitlines()
for l in rmdata:
for c in range(0, lines.count(l)):
i = lines.index(l)
del(lines[i])
write_file(path, "\n".join(lines))
########NEW FILE########
__FILENAME__ = httpserver
import SimpleHTTPServer
import multiprocessing
import os
class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer):
def server_start(self, root_dir):
os.chdir(root_dir)
self.serve_forever()
class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_message(self, format_str, *args):
pass
class HTTPService(object):
def __init__(self, root_dir, host=''):
self.root_dir = root_dir
self.host = host
self.port = 0
def start(self):
self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
if self.port == 0:
self.port = self.server.server_port
self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
self.process.start()
def stop(self):
self.server.server_close()
self.process.terminate()
self.process.join()
########NEW FILE########
__FILENAME__ = qemurunner
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module provides a class for starting qemu images using runqemu.
# It's used by testimage.bbclass.
import subprocess
import os
import time
import signal
import re
import socket
import select
import bb
class QemuRunner:
def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime):
# Popen object for runqemu
self.runqemu = None
# pid of the qemu process that runqemu will start
self.qemupid = None
# target ip - from the command line
self.ip = None
# host ip - where qemu is running
self.server_ip = None
self.machine = machine
self.rootfs = rootfs
self.display = display
self.tmpdir = tmpdir
self.deploy_dir_image = deploy_dir_image
self.logfile = logfile
self.boottime = boottime
self.runqemutime = 60
self.create_socket()
def create_socket(self):
self.bootlog = ''
self.qemusock = None
try:
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setblocking(0)
self.server_socket.bind(("127.0.0.1",0))
self.server_socket.listen(2)
self.serverport = self.server_socket.getsockname()[1]
bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport)
except socket.error, msg:
self.server_socket.close()
bb.fatal("Failed to create listening socket: %s" %msg[1])
def log(self, msg):
if self.logfile:
with open(self.logfile, "a") as f:
f.write("%s" % msg)
def start(self, qemuparams = None):
if self.display:
os.environ["DISPLAY"] = self.display
else:
bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)")
return False
if not os.path.exists(self.rootfs):
bb.error("Invalid rootfs %s" % self.rootfs)
return False
if not os.path.exists(self.tmpdir):
bb.error("Invalid TMPDIR path %s" % self.tmpdir)
return False
else:
os.environ["OE_TMPDIR"] = self.tmpdir
if not os.path.exists(self.deploy_dir_image):
bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
return False
else:
os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
# Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
# badly with screensavers.
os.environ["QEMU_DONT_GRAB"] = "1"
self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport
if qemuparams:
self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams)
self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp)
bb.note("runqemu started, pid is %s" % self.runqemu.pid)
bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
time.sleep(1)
if self.is_alive():
bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
cmdline = ''
with open('/proc/%s/cmdline' % self.qemupid) as p:
cmdline = p.read()
ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
if not ips or len(ips) != 3:
bb.note("Couldn't get ip from qemu process arguments! Here is the qemu command line used: %s" % cmdline)
self.stop()
return False
else:
self.ip = ips[0]
self.server_ip = ips[1]
bb.note("Target IP: %s" % self.ip)
bb.note("Server IP: %s" % self.server_ip)
bb.note("Waiting at most %d seconds for login banner" % self.boottime )
endtime = time.time() + self.boottime
socklist = [self.server_socket]
reachedlogin = False
stopread = False
while time.time() < endtime and not stopread:
sread, swrite, serror = select.select(socklist, [], [], 5)
for sock in sread:
if sock is self.server_socket:
self.qemusock, addr = self.server_socket.accept()
self.qemusock.setblocking(0)
socklist.append(self.qemusock)
socklist.remove(self.server_socket)
bb.note("Connection from %s:%s" % addr)
else:
data = sock.recv(1024)
if data:
self.log(data)
self.bootlog += data
if re.search("qemu.* login:", self.bootlog):
stopread = True
reachedlogin = True
bb.note("Reached login banner")
else:
socklist.remove(sock)
sock.close()
stopread = True
if not reachedlogin:
bb.note("Target didn't reached login boot in %d seconds" % self.boottime)
lines = "\n".join(self.bootlog.splitlines()[-5:])
bb.note("Last 5 lines of text:\n%s" % lines)
bb.note("Check full boot log: %s" % self.logfile)
self.stop()
return False
else:
bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
output = self.runqemu.stdout
self.stop()
bb.note("Output from runqemu:\n%s" % output.read())
return False
return self.is_alive()
def stop(self):
if self.runqemu:
bb.note("Sending SIGTERM to runqemu")
os.killpg(self.runqemu.pid, signal.SIGTERM)
endtime = time.time() + self.runqemutime
while self.runqemu.poll() is None and time.time() < endtime:
time.sleep(1)
if self.runqemu.poll() is None:
bb.note("Sending SIGKILL to runqemu")
os.killpg(self.runqemu.pid, signal.SIGKILL)
self.runqemu = None
if self.server_socket:
self.server_socket.close()
self.server_socket = None
self.qemupid = None
self.ip = None
def restart(self, qemuparams = None):
bb.note("Restarting qemu process")
if self.runqemu.poll() is None:
self.stop()
self.create_socket()
if self.start(qemuparams):
return True
return False
def is_alive(self):
qemu_child = self.find_child(str(self.runqemu.pid))
if qemu_child:
self.qemupid = qemu_child[0]
if os.path.exists("/proc/" + str(self.qemupid)):
return True
return False
def find_child(self,parent_pid):
#
# Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
#
ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
processes = ps.split('\n')
nfields = len(processes[0].split()) - 1
pids = {}
commands = {}
for row in processes[1:]:
data = row.split(None, nfields)
if len(data) != 3:
continue
if data[1] not in pids:
pids[data[1]] = []
pids[data[1]].append(data[0])
commands[data[0]] = data[2]
if parent_pid not in pids:
return []
parents = []
newparents = pids[parent_pid]
while newparents:
next = []
for p in newparents:
if p in pids:
for n in pids[p]:
if n not in parents and n not in next:
next.append(n)
if p not in parents:
parents.append(p)
newparents = next
#print "Children matching %s:" % str(parents)
for p in parents:
# Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx"
# Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
basecmd = commands[p].split()[0]
basecmd = os.path.basename(basecmd)
if "qemu-system" in basecmd and "-serial tcp" in commands[p]:
return [int(p),commands[p]]
########NEW FILE########
__FILENAME__ = sshcontrol
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Provides a class for setting up ssh connections,
# running commands and copying files to/from a target.
# It's used by testimage.bbclass and tests in lib/oeqa/runtime.
import subprocess
import time
import os
import select
class SSHProcess(object):
def __init__(self, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
"preexec_fn": os.setsid,
}
self.options = dict(self.defaultopts)
self.options.update(options)
self.status = None
self.output = None
self.process = None
self.starttime = None
self.logfile = None
def log(self, msg):
if self.logfile:
with open(self.logfile, "a") as f:
f.write("%s" % msg)
def run(self, command, timeout=None, logfile=None):
self.logfile = logfile
self.starttime = time.time()
output = ''
self.process = subprocess.Popen(command, **self.options)
if timeout:
endtime = self.starttime + timeout
eof = False
while time.time() < endtime and not eof:
if select.select([self.process.stdout], [], [], 5)[0] != []:
data = os.read(self.process.stdout.fileno(), 1024)
if not data:
self.process.stdout.close()
eof = True
else:
output += data
self.log(data)
endtime = time.time() + timeout
# process hasn't returned yet
if not eof:
self.process.terminate()
time.sleep(5)
try:
self.process.kill()
except OSError:
pass
lastline = "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime)
self.log(lastline)
output += lastline
else:
output = self.process.communicate()[0]
self.log(output.rstrip())
self.status = self.process.wait()
self.output = output.rstrip()
return (self.status, self.output)
class SSHControl(object):
def __init__(self, ip, logfile=None, timeout=300, user='root', port=None):
self.ip = ip
self.defaulttimeout = timeout
self.ignore_status = True
self.logfile = logfile
self.user = user
self.ssh_options = [
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR'
]
self.ssh = ['ssh', '-l', self.user ] + self.ssh_options
self.scp = ['scp'] + self.ssh_options
if port:
self.ssh = self.ssh + [ '-p', port ]
self.scp = self.scp + [ '-P', port ]
def log(self, msg):
if self.logfile:
with open(self.logfile, "a") as f:
f.write("%s\n" % msg)
def _internal_run(self, command, timeout=None, ignore_status = True):
self.log("[Running]$ %s" % " ".join(command))
proc = SSHProcess()
status, output = proc.run(command, timeout, logfile=self.logfile)
self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime))
if status and not ignore_status:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output))
return (status, output)
def run(self, command, timeout=None):
"""
command - ssh command to run
timeout=<val> - kill command if there is no output after <val> seconds
timeout=None - kill command if there is no output after a default value seconds
timeout=0 - no timeout, let command run until it returns
"""
# We need to source /etc/profile for a proper PATH on the target
command = self.ssh + [self.ip, ' . /etc/profile; ' + command]
if timeout is None:
return self._internal_run(command, self.defaulttimeout, self.ignore_status)
if timeout == 0:
return self._internal_run(command, None, self.ignore_status)
return self._internal_run(command, timeout, self.ignore_status)
def copy_to(self, localpath, remotepath):
command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)]
return self._internal_run(command, ignore_status=False)
def copy_from(self, remotepath, localpath):
command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath]
return self._internal_run(command, ignore_status=False)
########NEW FILE########
__FILENAME__ = targetbuild
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Provides a class for automating build tests for projects
import os
import re
import subprocess
class TargetBuildProject():
def __init__(self, target, d, uri, foldername=None):
self.target = target
self.d = d
self.uri = uri
self.targetdir = "~/"
self.archive = os.path.basename(uri)
self.localarchive = "/tmp/" + self.archive
self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive)
if foldername:
self.fname = foldername
def download_archive(self):
exportvars = ['HTTP_PROXY', 'http_proxy',
'HTTPS_PROXY', 'https_proxy',
'FTP_PROXY', 'ftp_proxy',
'FTPS_PROXY', 'ftps_proxy',
'NO_PROXY', 'no_proxy',
'ALL_PROXY', 'all_proxy',
'SOCKS5_USER', 'SOCKS5_PASSWD']
cmd = ''
for var in exportvars:
val = self.d.getVar(var, True)
if val:
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri)
subprocess.check_call(cmd, shell=True)
(status, output) = self.target.copy_to(self.localarchive, self.targetdir)
if status != 0:
raise Exception("Failed to copy archive to target, output: %s" % output)
(status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir))
if status != 0:
raise Exception("Failed to extract archive, output: %s" % output)
#Change targetdir to project folder
self.targetdir = self.targetdir + self.fname
# The timeout parameter of target.run is set to 0 to make the ssh command
# run with no timeout.
def run_configure(self):
return self.target.run('cd %s; ./configure' % self.targetdir, 0)[0]
def run_make(self):
return self.target.run('cd %s; make' % self.targetdir, 0)[0]
def run_install(self):
return self.target.run('cd %s; make install' % self.targetdir, 0)[0]
def clean(self):
self.target.run('rm -rf %s' % self.targetdir)
subprocess.call('rm -f %s' % self.localarchive, shell=True)
########NEW FILE########
__FILENAME__ = sitecustomize
# OpenEmbedded sitecustomize.py (C) 2002-2008 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# GPLv2 or later
# Version: 20081123
# Features:
# * set proper default encoding
# * enable readline completion in the interactive interpreter
# * load command line history on startup
# * save command line history on exit
import os
def __exithandler():
try:
readline.write_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __registerExitHandler():
import atexit
atexit.register( __exithandler )
def __enableReadlineSupport():
readline.set_history_length( 1000 )
readline.parse_and_bind( "tab: complete" )
try:
readline.read_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __enableDefaultEncoding():
import sys
try:
sys.setdefaultencoding( "utf8" )
except LookupError:
pass
import sys
try:
import rlcompleter, readline
except ImportError:
pass
else:
__enableDefaultEncoding()
__registerExitHandler()
__enableReadlineSupport()
########NEW FILE########
__FILENAME__ = sitecustomize
# OpenEmbedded sitecustomize.py (C) 2002-2008 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# GPLv2 or later
# Version: 20081123
# Features:
# * set proper default encoding
# * enable readline completion in the interactive interpreter
# * load command line history on startup
# * save command line history on exit
import os
def __exithandler():
try:
readline.write_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
def __registerExitHandler():
import atexit
atexit.register( __exithandler )
def __enableReadlineSupport():
readline.set_history_length( 1000 )
readline.parse_and_bind( "tab: complete" )
try:
readline.read_history_file( "%s/.python-history" % os.getenv( "HOME", "/tmp" ) )
except IOError:
pass
import sys
try:
import rlcompleter, readline
except ImportError:
pass
else:
__registerExitHandler()
__enableReadlineSupport()
########NEW FILE########
__FILENAME__ = please_wait_dialog
#!/usr/bin/env python
import glib
import gtk
def destroy_window_cb(widget, event):
gtk.main_quit()
def all_done_cb():
gtk.main_quit()
def dialogue_ui():
window = gtk.Window()
window.set_title("Please wait...")
window.connect("delete-event", destroy_window_cb)
window.show()
window.set_border_width(12)
msg = "Please wait while BitBake initializes Hob"
label = gtk.Label(msg)
label.show()
window.add(label)
glib.timeout_add_seconds(10, all_done_cb)
if __name__ == "__main__":
dialogue_ui()
gtk.main()
########NEW FILE########
__FILENAME__ = rpm-createsolvedb
#!/usr/bin/env python
#
# This script generates a solution database for a directory containing rpm packages
# but tries to be efficient about this, only doing so when the packages have changed
# in some way.
#
# It is assumed something already went through and removed all the solvedb.done stamp files
# in advance.
#
# First argument - the rpm binary to use
# Subsequent arguments - paths to process solution databases for
#
import sys, os
import hashlib
import stat
import subprocess
if len(sys.argv) < 1:
print("Error, rpm command not specified")
sys.exit(1)
if len(sys.argv) < 2:
print("Error, no paths specified")
sys.exit(1)
paths = sys.argv[2:]
for path in paths:
if os.path.exists(path + "/solvedb.done"):
continue
data = ""
manifest = []
for root, dirs, files in os.walk(path):
for file in files:
f = os.path.join(root, file)
if f.startswith(path + "/" + "solvedb"):
continue
data = data + str(os.stat(f)[stat.ST_MTIME])
manifest.append(f)
checksum = hashlib.md5(data).hexdigest()
if os.path.exists(path + "/solvedb.checksum") and open(path + "/solvedb.checksum", "r").read() == checksum:
open(path + "/solvedb.done", "w")
continue
if os.path.exists(path + "/solvedb"):
subprocess.call("rm -rf %s" % (path + "/solvedb"), shell=True)
os.mkdir(path + "/solvedb")
m = open(path + "/solvedb/manifest", "w")
m.write("# Dynamically generated solve manifest\n")
for f in manifest:
m.write(f + "\n")
m.close()
cmd = sys.argv[1] + ' -i --replacepkgs --replacefiles --oldpackage -D "_dbpath ' + path + '/solvedb" --justdb \
--noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
--ignoresize --nosignature --nodigest -D "__dbi_txn create nofsync" \
' + path + '/solvedb/manifest'
subprocess.call(cmd, shell=True)
open(path + "/solvedb.checksum", "w").write(checksum)
open(path + "/solvedb.done", "w")
########NEW FILE########
__FILENAME__ = bbvars
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
import sys
import getopt
import os
import os.path
import re
def usage():
print 'Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0])
print ' -d FILENAME documentation file to search'
print ' -h, --help display this help and exit'
print ' -m METADIR meta directory to search for recipes'
print ' -t FILENAME documentation config file (for doc tags)'
print ' -T Only display variables with doc tags (requires -t)'
def recipe_bbvars(recipe):
''' Return a unique set of every bbvar encountered in the recipe '''
prog = re.compile("[A-Z_]+")
vset = set()
try:
r = open(recipe)
except IOError as (errno, strerror):
print 'WARNING: Failed to open recipe ', recipe
print strerror
for line in r:
# Strip any comments from the line
line = line.rsplit('#')[0]
vset = vset.union(set(prog.findall(line)))
r.close()
bbvars = {}
for v in vset:
bbvars[v] = 1
return bbvars
def collect_bbvars(metadir):
''' Walk the metadir and collect the bbvars from each recipe found '''
bbvars = {}
for root,dirs,files in os.walk(metadir):
for name in files:
if name.find(".bb") >= 0:
for key in recipe_bbvars(os.path.join(root,name)).iterkeys():
if bbvars.has_key(key):
bbvars[key] = bbvars[key] + 1
else:
bbvars[key] = 1
return bbvars
def bbvar_is_documented(var, docfiles):
prog = re.compile(".*($|[^A-Z_])%s([^A-Z_]|$)" % (var))
for doc in docfiles:
try:
f = open(doc)
except IOError as (errno, strerror):
print 'WARNING: Failed to open doc ', doc
print strerror
for line in f:
if prog.match(line):
return True
f.close()
return False
def bbvar_doctag(var, docconf):
prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
if docconf == "":
return "?"
try:
f = open(docconf)
except IOError as (errno, strerror):
return strerror
for line in f:
m = prog.search(line)
if m:
return m.group(1)
f.close()
return ""
def main():
docfiles = []
metadirs = []
bbvars = {}
undocumented = []
docconf = ""
onlydoctags = False
# Collect and validate input
try:
opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"])
except getopt.GetoptError, err:
print '%s' % str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
elif o == '-d':
if os.path.isfile(a):
docfiles.append(a)
else:
print 'ERROR: documentation file %s is not a regular file' % (a)
sys.exit(3)
elif o == '-m':
if os.path.isdir(a):
metadirs.append(a)
else:
print 'ERROR: meta directory %s is not a directory' % (a)
sys.exit(4)
elif o == "-t":
if os.path.isfile(a):
docconf = a
elif o == "-T":
onlydoctags = True
else:
assert False, "unhandled option"
if len(docfiles) == 0:
print 'ERROR: no docfile specified'
usage()
sys.exit(5)
if len(metadirs) == 0:
print 'ERROR: no metadir specified'
usage()
sys.exit(6)
if onlydoctags and docconf == "":
print 'ERROR: no docconf specified'
usage()
sys.exit(7)
# Collect all the variable names from the recipes in the metadirs
for m in metadirs:
for key,cnt in collect_bbvars(m).iteritems():
if bbvars.has_key(key):
bbvars[key] = bbvars[key] + cnt
else:
bbvars[key] = cnt
# Check each var for documentation
varlen = 0
for v in bbvars.iterkeys():
if len(v) > varlen:
varlen = len(v)
if not bbvar_is_documented(v, docfiles):
undocumented.append(v)
undocumented.sort()
varlen = varlen + 1
# Report all undocumented variables
print 'Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars))
header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7))
print header
print str("").ljust(len(header), '=')
for v in undocumented:
doctag = bbvar_doctag(v, docconf)
if not onlydoctags or not doctag == "":
print '%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag)
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = list-packageconfig-flags
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation.
#
# Copyright (C) 2013 Wind River Systems, Inc.
#
# - list available pkgs which have PACKAGECONFIG flags
# - list available PACKAGECONFIG flags and all affected pkgs
# - list all pkgs and PACKAGECONFIG information
import sys
import getopt
import os
def search_bitbakepath():
bitbakepath = ""
# Search path to bitbake lib dir in order to load bb modules
if os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib/bb')):
bitbakepath = os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib')
bitbakepath = os.path.abspath(bitbakepath)
else:
# Look for bitbake/bin dir in PATH
for pth in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(pth, '../lib/bb')):
bitbakepath = os.path.abspath(os.path.join(pth, '../lib'))
break
if not bitbakepath:
sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
sys.exit(1)
return bitbakepath
# For importing the following modules
sys.path.insert(0, search_bitbakepath())
import bb.cache
import bb.cooker
import bb.providers
import bb.tinfoil
usage_body = ''' list available pkgs which have PACKAGECONFIG flags
OPTION:
-h, --help display this help and exit
-f, --flag list available PACKAGECONFIG flags and all affected pkgs
-a, --all list all pkgs and PACKAGECONFIG information
-p, --prefer list pkgs with preferred version
EXAMPLE:
list-packageconfig-flags.py
list-packageconfig-flags.py -f
list-packageconfig-flags.py -a
list-packageconfig-flags.py -p
list-packageconfig-flags.py -f -p
list-packageconfig-flags.py -a -p
'''
def usage():
print 'Usage: %s [-f|-a] [-p]' % os.path.basename(sys.argv[0])
print usage_body
def get_fnlist(bbhandler, pkg_pn, preferred):
''' Get all recipe file names '''
if preferred:
(latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecache, pkg_pn)
fn_list = []
for pn in sorted(pkg_pn):
if preferred:
fn_list.append(preferred_versions[pn][1])
else:
fn_list.extend(pkg_pn[pn])
return fn_list
def get_recipesdata(bbhandler, preferred):
''' Get data of all available recipes which have PACKAGECONFIG flags '''
pkg_pn = bbhandler.cooker.recipecache.pkg_pn
data_dict = {}
for fn in get_fnlist(bbhandler, pkg_pn, preferred):
data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data)
if data.getVarFlags("PACKAGECONFIG"):
data_dict[fn] = data
return data_dict
def collect_pkgs(data_dict):
''' Collect available pkgs in which have PACKAGECONFIG flags '''
# pkg_dict = {'pkg1': ['flag1', 'flag2',...]}
pkg_dict = {}
for fn in data_dict:
pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
pkgname = data_dict[fn].getVar("P", True)
pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
return pkg_dict
def collect_flags(pkg_dict):
''' Collect available PACKAGECONFIG flags and all affected pkgs '''
# flag_dict = {'flag': ['pkg1', 'pkg2',...]}
flag_dict = {}
for pkgname, flaglist in pkg_dict.iteritems():
for flag in flaglist:
if flag == "defaultval":
continue
if flag in flag_dict:
flag_dict[flag].append(pkgname)
else:
flag_dict[flag] = [pkgname]
return flag_dict
def display_pkgs(pkg_dict):
''' Display available pkgs which have PACKAGECONFIG flags '''
pkgname_len = len("PACKAGE NAME") + 1
for pkgname in pkg_dict:
if pkgname_len < len(pkgname):
pkgname_len = len(pkgname)
pkgname_len += 1
header = '%-*s%s' % (pkgname_len, str("PACKAGE NAME"), str("PACKAGECONFIG FLAGS"))
print header
print str("").ljust(len(header), '=')
for pkgname in sorted(pkg_dict):
print('%-*s%s' % (pkgname_len, pkgname, ' '.join(pkg_dict[pkgname])))
def display_flags(flag_dict):
''' Display available PACKAGECONFIG flags and all affected pkgs '''
flag_len = len("PACKAGECONFIG FLAG") + 5
header = '%-*s%s' % (flag_len, str("PACKAGECONFIG FLAG"), str("PACKAGE NAMES"))
print header
print str("").ljust(len(header), '=')
for flag in sorted(flag_dict):
print('%-*s%s' % (flag_len, flag, ' '.join(sorted(flag_dict[flag]))))
def display_all(data_dict):
''' Display all pkgs and PACKAGECONFIG information '''
print str("").ljust(50, '=')
for fn in data_dict:
print('%s' % data_dict[fn].getVar("P", True))
print fn
packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or ''
if packageconfig.strip() == '':
packageconfig = 'None'
print('PACKAGECONFIG %s' % packageconfig)
for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").iteritems():
if flag == "defaultval":
continue
print('PACKAGECONFIG[%s] %s' % (flag, flag_val))
print ''
def main():
listtype = 'pkgs'
preferred = False
pkg_dict = {}
flag_dict = {}
# Collect and validate input
try:
opts, args = getopt.getopt(sys.argv[1:], "hfap", ["help", "flag", "all", "prefer"])
except getopt.GetoptError, err:
print >> sys.stderr,'%s' % str(err)
usage()
sys.exit(2)
for opt, value in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-f', '--flag'):
listtype = 'flags'
elif opt in ('-a', '--all'):
listtype = 'all'
elif opt in ('-p', '--prefer'):
preferred = True
else:
assert False, "unhandled option"
bbhandler = bb.tinfoil.Tinfoil()
bbhandler.prepare()
data_dict = get_recipesdata(bbhandler, preferred)
if listtype == 'flags':
pkg_dict = collect_pkgs(data_dict)
flag_dict = collect_flags(pkg_dict)
display_flags(flag_dict)
elif listtype == 'pkgs':
pkg_dict = collect_pkgs(data_dict)
display_pkgs(pkg_dict)
elif listtype == 'all':
display_all(data_dict)
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = generate-manifest-2.7
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
#
# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
# * Updated to no longer generate special -dbg package, instead use the
# single system -dbg
# * Update version with ".1" to indicate this change
import os
import sys
import time
VERSION = "2.7.2"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222.2"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-dbg '
for name in sorted(self.packages):
if name.startswith("${PN}-distutils"):
if name == "${PN}-distutils":
packageLine += "%s-staticdev %s " % (name, name)
elif name != '${PN}-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'SUMMARY_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
try:
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
"_weakrefset.* sysconfig.* config/Makefile " +
"${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
"${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
m.addPackage( "${PN}-dev", "Python development package", "${PN}-core",
"${includedir} " +
"${libdir}/lib*${SOLIBSDEV} " +
"${libdir}/*.la " +
"${libdir}/*.a " +
"${libdir}/*.o " +
"${libdir}/pkgconfig " +
"${base_libdir}/*.a " +
"${base_libdir}/*.o " +
"${datadir}/aclocal " +
"${datadir}/pkgconfig " )
m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
"${bindir}/2to3 lib2to3" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.* pydoc_data" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.* smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
m.addPackage( "${PN}-bsddb", "Python bindings for the Berkeley Database", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python compiler support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils",
"config/lib*.a" ) # package
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python hotshot performance profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math ${PN}-textutils",
"lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.so" ) # package
m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.* stringold.*" )
m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
"unittest/" )
m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib compression support", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
########NEW FILE########
__FILENAME__ = generate-manifest-3.3
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
#
# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
# * Updated to no longer generate special -dbg package, instead use the
# single system -dbg
# * Update version with ".1" to indicate this change
#
# 2014 Khem Raj <raj.khem@gmail.com>
# Added python3 support
#
import os
import sys
import time
VERSION = "3.3.3"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20140131"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-dbg '
for name in sorted(self.packages):
if name.startswith("${PN}-distutils"):
if name == "${PN}-distutils":
packageLine += "%s-staticdev %s " % (name, name)
elif name != '${PN}-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'SUMMARY_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
try:
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.*.so lib-dynload/_struct.*.so lib-dynload/time.*.so " +
"lib-dynload/xreadlines.*.so types.* platform.* ${bindir}/python* " +
"_weakrefset.* sysconfig.* config/Makefile " +
"${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
"${libdir}/python${PYTHON_MAJMIN}/collections " +
"${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
m.addPackage( "${PN}-dev", "Python development package", "${PN}-core",
"${includedir} " +
"${libdir}/lib*${SOLIBSDEV} " +
"${libdir}/*.la " +
"${libdir}/*.a " +
"${libdir}/*.o " +
"${libdir}/pkgconfig " +
"${base_libdir}/*.a " +
"${base_libdir}/*.o " +
"${datadir}/aclocal " +
"${datadir}/pkgconfig " )
m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
"${bindir}/2to3 lib2to3" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.* pydoc_data" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.* smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.*.so lib-dynload/audioop.*.so audiodev.* sunaudio.* sunau.* toaiff.*" )
m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.*.so lib-dynload/_codecs* lib-dynload/_multibytecodec.*.so lib-dynload/unicodedata.*.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-codecs",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.*.so" )
m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.*.so lib-dynload/_hashlib.*.so lib-dynload/_sha256.*.so lib-dynload/_sha512.*.so" )
m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.*.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
"curses lib-dynload/_curses.*.so lib-dynload/_curses_panel.*.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core",
"ctypes lib-dynload/_ctypes.*.so lib-dynload/_ctypes_test.*.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.*.so" )
m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* dbm lib-dynload/_dbm.*.so" )
m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils",
"config/lib*.a" ) # package
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.*.so" )
m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
"lib-dynload/fcntl.*.so" )
m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
"lib-dynload/_gdbm.*.so" )
m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.*.so lib-dynload/rgbimg.*.so" )
m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math",
"lib-dynload/_socket.*.so lib-dynload/_io.*.so lib-dynload/_ssl.*.so lib-dynload/select.*.so lib-dynload/termios.*.so lib-dynload/cStringIO.*.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.*.so" ) # package
m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core",
"lib-dynload/_bisect.*.so lib-dynload/_collections.*.so lib-dynload/_heapq.*.so lib-dynload/_weakref.*.so lib-dynload/_functools.*.so " +
"lib-dynload/array.*.so lib-dynload/itertools.*.so lib-dynload/operator.*.so lib-dynload/parser.*.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python math support", "${PN}-core",
"lib-dynload/cmath.*.so lib-dynload/math.*.so lib-dynload/_random.*.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.*.so " )
m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
"lib-dynload/_multiprocessing.*.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.*.so pickletools.*" )
m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.*.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core",
"lib-dynload/readline.*.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
"lib-dynload/resource.*.so" )
m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"urllib/robotparser.*")
m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading",
"lib-dynload/_sqlite3.*.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.*.so string.* stringold.*" )
m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core",
"lib-dynload/syslog.*.so" )
m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
"lib-dynload/_tkinter.*.so lib-tk tkinter" ) # package
m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
"unittest/" )
m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core",
"lib-dynload/nis.*.so lib-dynload/grp.*.so lib-dynload/pwd.*.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.*.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.* xmlrpc" )
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
########NEW FILE########
__FILENAME__ = jhbuild2oe
#!/usr/bin/env python
# Available modulesets:
#
# bootstrap.modules
# freedesktop.modules
# gcj.modules
# gnome-2.10.modules
# gnome-2.12.modules
# gnome-2.14.modules
# gnome-2.16.modules
# gnutls.modules
# gtk28.modules
# gtk.modules
# xorg-7.0.modules
# xorg.modules
moduleset = 'xorg.modules'
import cElementTree as ElementTree
# import lxml.etree as ElementTree
import re, os, bb, bb.data
class Handlers(object):
"""
Class to act as a store for handlers of jhbuild xml elements, and as a
dispatcher of parsed Elements to those handlers.
These handlers exist to take an xml element from the jhbuild files and
either produce bitbake metadata in self.packages, or produce data which
will be used by other element handlers to do so.
Handlers(filename) -> new object to parse and process jhbuild file of
name 'filename'.
"""
cvsrootpat = re.compile(r'''
\s* # Skip leading whitespace
:(?P<scheme>[^:]+): # scheme (i.e. pserver, ext)
((?P<user>\S+?)@)? # username
(?P<host>\S+?): # non-greedy match of the remote host
(?P<path>\S+) # remote path
''', re.VERBOSE)
def __init__(self, msfile):
self.msfile = msfile
self.msbasename = os.path.basename(msfile)
self.msdirname = os.path.dirname(msfile)
self.handled = {}
self.cvsroots = {}
self.repositories = {}
self.packages = []
def handle(self, element, parent):
import sys
"""
XML Element dispatch function. Can be called both from outside the
Handlers object to initiate handling, and from within individual XML
element handlers to ensure that dependent elements have been handled.
Does not handle a given XML Element more than once, as it retains
information about the handling state of the Elements it encounters.
"""
try:
state = self.handled[element]
except KeyError:
pass
except:
return
try:
self.__class__.__dict__[element.tag](self, element, parent)
self.handled[element] = True
except KeyError:
self.handled[element] = False
sys.__stderr__.write('Unhandled element: %s\n' % element.tag)
except Exception:
sys.__stderr__.write('Error handling %s: %s:\n %s\n' % (element.tag, sys.exc_type, sys.exc_value))
self.handled[element] = False
print('handle(%s, %s) -> %s' % (element, parent, self.handled[element]))
return self.handled[element]
def cvsroot(self, element, parent):
# Rip apart the cvsroot style location to build a cvs:// url for
# bitbake's usage in the cvsmodule handler.
# root=":pserver:anoncvs@cvs.freedesktop.org:/cvs/fontconfig"
print("cvsroot(%s, %s)" % (element, parent))
root = element.attrib.get('root')
rootmatch = re.match(Handlers.cvsrootpat, root)
name = element.attrib.get('name')
user = rootmatch.group('user') or ''
if user != '':
pw = element.attrib.get('password') or ''
if pw != '':
pw = ':' + pw + '@'
else:
user = user + '@'
print('user: %s' % user)
print('pw: %s' % pw)
host = rootmatch.group('host')
print('host: %s' % host)
path = rootmatch.group('path') or '/'
print('path: %s' % path)
root = "cvs://%s%s%s%s" % (user, pw, host, path)
print('root: %s' % root)
self.cvsroots[name] = root
def cvsmodule(self, element, parent):
rootlist = [root for root in list(parent) if root.attrib.get('name') == element.attrib.get('cvsroot')]
if len(rootlist) < 1:
raise Exception("Error: cvsmodule '%s' requires cvsroot '%s'." % (element.attrib.get('module'), element.attrib.get('cvsroot')))
cvsroot = rootlist[0]
def include(self, element, parent):
href = element.attrib.get('href')
fullhref = os.path.join(self.msdirname, href)
tree = ElementTree.ElementTree(file=fullhref)
elem = tree.getroot()
# Append the children of the newly included root element to the parent
# element, and manually handle() them, as the currently running
# iteration isn't going to hit them.
for child in elem:
self.handle(child, elem)
parent.append(elem)
def repository(self, element, parent):
# TODO:
# Convert the URL in the href attribute, if necessary, to the format
# which bitbake expects to see in SRC_URI.
name = element.attrib.get('name')
self.repositories[name] = element.attrib.get('href')
def moduleset(self, element, parent):
for child in element:
self.handle(child, element)
def packagename(self, name):
# mangle name into an appropriate bitbake package name
return name.replace('/', '-')
def metamodule(self, element, parent):
# grab the deps
deps = None
for child in element:
if child.tag == 'dependencies':
deps = [self.packagename(dep.attrib.get('package')) for dep in child if dep.tag == "dep"]
# create the package
d = bb.data.init()
pn = self.packagename(element.attrib.get('id'))
d.setVar('PN', pn)
bb.data.setVar('DEPENDS', ' '.join(deps), d)
d.setVar('_handler', 'metamodule')
self.packages.append(d)
def autotools(self, element, parent):
deps = None
branch = None
for child in element:
if child.tag == 'dependencies':
deps = [self.packagename(dep.attrib.get('package')) for dep in child if dep.tag == "dep"]
elif child.tag == 'branch':
branch = child
# create the package
d = bb.data.init()
id = element.attrib.get('id')
if id is None:
raise Exception('Error: autotools element has no id attribute.')
pn = self.packagename(id)
d.setVar('PN', pn)
if deps is not None:
bb.data.setVar('DEPENDS', ' '.join(deps), d)
if branch is not None:
# <branch repo="git.freedesktop.org" module="xorg/xserver"/>
repo = os.path.join(self.repositories[branch.attrib.get('repo')], branch.attrib.get('module'))
d.setVar('SRC_URI', repo)
checkoutdir = branch.attrib.get('checkoutdir')
if checkoutdir is not None:
bb.data.setVar('S', os.path.join('${WORKDIR}', checkoutdir), d)
# build class
d.setVar('INHERITS', 'autotools')
d.setVarFlag('INHERITS', 'operator', '+=')
d.setVar('_handler', 'autotools')
self.packages.append(d)
class Emitter(object):
"""
Class which contains a single method for the emission of a bitbake
package from the bitbake data produced by a Handlers object.
"""
def __init__(self, filefunc = None, basedir = None):
def _defaultfilefunc(package):
# return a relative path to the bitbake .bb which will be written
return package.getVar('PN', 1) + '.bb'
self.filefunc = filefunc or _defaultfilefunc
self.basedir = basedir or os.path.abspath(os.curdir)
def write(self, package, template = None):
# 1) Assemble new file contents in ram, either new from bitbake
# metadata, or a combination of the template and that metadata.
# 2) Open the path returned by the filefunc + the basedir for writing.
# 3) Write the new bitbake data file.
fdata = ''
if template:
f = file(template, 'r')
fdata = f.read()
f.close()
for key in bb.data.keys(package):
fdata = fdata.replace('@@'+key+'@@', package.getVar(key))
else:
for key in bb.data.keys(package):
if key == '_handler':
continue
elif key == 'INHERITS':
fdata += 'inherit %s\n' % package.getVar('INHERITS')
else:
oper = package.getVarFlag(key, 'operator') or '='
fdata += '%s %s "%s"\n' % (key, oper, package.getVar(key))
if not os.path.exists(os.path.join(self.basedir, os.path.dirname(self.filefunc(package)))):
os.makedirs(os.path.join(self.basedir, os.path.dirname(self.filefunc(package))))
out = file(os.path.join(self.basedir, self.filefunc(package)), 'w')
out.write(fdata)
out.close()
def _test():
msfile = os.path.join(os.path.abspath(os.curdir), 'modulesets', moduleset)
tree = ElementTree.ElementTree(file=msfile)
elem = tree.getroot()
handlers = Handlers(msfile)
handlers.handle(elem, None)
def filefunc(package):
# return a relative path to the bitbake .bb which will be written
src_uri = package.getVar('SRC_URI', 1)
filename = package.getVar('PN', 1) + '.bb'
if not src_uri:
return filename
else:
substr = src_uri[src_uri.find('xorg/'):]
subdirlist = substr.split('/')[:2]
subdir = '-'.join(subdirlist)
return os.path.join(subdir, filename)
emitter = Emitter(filefunc)
for package in handlers.packages:
template = emitter.filefunc(package) + '.in'
if os.path.exists(template):
print("%s exists, emitting based on template" % template)
emitter.write(package, template)
else:
print("%s does not exist, emitting non-templated" % template)
emitter.write(package)
if __name__ == "__main__":
_test()
########NEW FILE########
__FILENAME__ = engine
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module implements the image creation engine used by 'wic' to
# create images. The engine parses through the OpenEmbedded kickstart
# (wks) file specified and generates images that can then be directly
# written onto media.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import os
import sys
from abc import ABCMeta, abstractmethod
import shlex
import json
import subprocess
import shutil
import os, sys, errno
from mic import msger, creator
from mic.utils import cmdln, misc, errors
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.__version__ import VERSION
from mic.utils.oe.misc import *
def verify_build_env():
"""
Verify that the build environment is sane.
Returns True if it is, false otherwise
"""
try:
builddir = os.environ["BUILDDIR"]
except KeyError:
print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
sys.exit(1)
return True
def find_artifacts(image_name):
"""
Gather the build artifacts for the current image (the image_name
e.g. core-image-minimal) for the current MACHINE set in local.conf
"""
bitbake_env_lines = get_bitbake_env_lines()
rootfs_dir = kernel_dir = hdddir = staging_data_dir = native_sysroot = ""
for line in bitbake_env_lines.split('\n'):
if (get_line_val(line, "IMAGE_ROOTFS")):
rootfs_dir = get_line_val(line, "IMAGE_ROOTFS")
continue
if (get_line_val(line, "STAGING_KERNEL_DIR")):
kernel_dir = get_line_val(line, "STAGING_KERNEL_DIR")
continue
if (get_line_val(line, "HDDDIR")):
hdddir = get_line_val(line, "HDDDIR")
continue
if (get_line_val(line, "STAGING_DATADIR")):
staging_data_dir = get_line_val(line, "STAGING_DATADIR")
continue
if (get_line_val(line, "STAGING_DIR_NATIVE")):
native_sysroot = get_line_val(line, "STAGING_DIR_NATIVE")
continue
return (rootfs_dir, kernel_dir, hdddir, staging_data_dir, native_sysroot)
CANNED_IMAGE_DIR = "lib/image/canned-wks" # relative to scripts
SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
def build_canned_image_list(dl):
layers_path = get_bitbake_var("BBLAYERS")
canned_wks_layer_dirs = []
for layer_path in layers_path.split():
path = os.path.join(layer_path, SCRIPTS_CANNED_IMAGE_DIR)
canned_wks_layer_dirs.append(path)
path = os.path.join(dl, CANNED_IMAGE_DIR)
canned_wks_layer_dirs.append(path)
return canned_wks_layer_dirs
def find_canned_image(scripts_path, wks_file):
"""
Find a .wks file with the given name in the canned files dir.
Return False if not found
"""
layers_canned_wks_dir = build_canned_image_list(scripts_path)
for canned_wks_dir in layers_canned_wks_dir:
for root, dirs, files in os.walk(canned_wks_dir):
for file in files:
if file.endswith("~") or file.endswith("#"):
continue
if file.endswith(".wks") and wks_file + ".wks" == file:
fullpath = os.path.join(canned_wks_dir, file)
return fullpath
return None
def list_canned_images(scripts_path):
"""
List the .wks files in the canned image dir, minus the extension.
"""
layers_canned_wks_dir = build_canned_image_list(scripts_path)
for canned_wks_dir in layers_canned_wks_dir:
for root, dirs, files in os.walk(canned_wks_dir):
for file in files:
if file.endswith("~") or file.endswith("#"):
continue
if file.endswith(".wks"):
fullpath = os.path.join(canned_wks_dir, file)
f = open(fullpath, "r")
lines = f.readlines()
for line in lines:
desc = ""
idx = line.find("short-description:")
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
basename = os.path.splitext(file)[0]
print " %s\t\t%s" % (basename.ljust(30), desc)
def list_canned_image_help(scripts_path, fullpath):
"""
List the help and params in the specified canned image.
"""
f = open(fullpath, "r")
lines = f.readlines()
found = False
for line in lines:
if not found:
idx = line.find("long-description:")
if idx != -1:
print
print line[idx + len("long-description:"):].strip()
found = True
continue
if not line.strip():
break
idx = line.find("#")
if idx != -1:
print line[idx + len("#:"):].rstrip()
else:
break
def wic_create(args, wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, hdddir, staging_data_dir, scripts_path,
image_output_dir, debug, properties_file, properties=None):
"""
Create image
wks_file - user-defined OE kickstart file
rootfs_dir - absolute path to the build's /rootfs dir
bootimg_dir - absolute path to the build's boot artifacts directory
kernel_dir - absolute path to the build's kernel directory
native_sysroot - absolute path to the build's native sysroots dir
hdddir - absolute path to the build's HDDDIR dir
staging_data_dir - absolute path to the build's STAGING_DATA_DIR dir
scripts_path - absolute path to /scripts dir
image_output_dir - dirname to create for image
properties_file - use values from this file if nonempty i.e no prompting
properties - use values from this string if nonempty i.e no prompting
Normally, the values for the build artifacts values are determined
by 'wic -e' from the output of the 'bitbake -e' command given an
image name e.g. 'core-image-minimal' and a given machine set in
local.conf. If that's the case, the variables get the following
values from the output of 'bitbake -e':
rootfs_dir: IMAGE_ROOTFS
kernel_dir: STAGING_KERNEL_DIR
native_sysroot: STAGING_DIR_NATIVE
hdddir: HDDDIR
staging_data_dir: STAGING_DATA_DIR
In the above case, bootimg_dir remains unset and the image
creation code determines which of the passed-in directories to
use.
In the case where the values are passed in explicitly i.e 'wic -e'
is not used but rather the individual 'wic' options are used to
explicitly specify these values, hdddir and staging_data_dir will
be unset, but bootimg_dir must be explicit i.e. explicitly set to
either hdddir or staging_data_dir, depending on the image being
generated. The other values (rootfs_dir, kernel_dir, and
native_sysroot) correspond to the same values found above via
'bitbake -e').
"""
try:
oe_builddir = os.environ["BUILDDIR"]
except KeyError:
print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
sys.exit(1)
direct_args = list()
direct_args.insert(0, oe_builddir)
direct_args.insert(0, image_output_dir)
direct_args.insert(0, wks_file)
direct_args.insert(0, rootfs_dir)
direct_args.insert(0, bootimg_dir)
direct_args.insert(0, kernel_dir)
direct_args.insert(0, native_sysroot)
direct_args.insert(0, hdddir)
direct_args.insert(0, staging_data_dir)
direct_args.insert(0, "direct")
if debug:
msger.set_loglevel('debug')
cr = creator.Creator()
cr.main(direct_args)
print "\nThe image(s) were created using OE kickstart file:\n %s" % wks_file
def wic_list(args, scripts_path, properties_file):
"""
Print the complete list of properties defined by the image, or the
possible values for a particular image property.
"""
if len(args) < 1:
return False
if len(args) == 1:
if args[0] == "images":
list_canned_images(scripts_path)
return True
elif args[0] == "properties":
return True
else:
return False
if len(args) == 2:
if args[0] == "properties":
wks_file = args[1]
print "print properties contained in wks file: %s" % wks_file
return True
elif args[0] == "property":
print "print property values for property: %s" % args[1]
return True
elif args[1] == "help":
wks_file = args[0]
fullpath = find_canned_image(scripts_path, wks_file)
if not fullpath:
print "No image named %s found, exiting. (Use 'wic list images' to list available images, or specify a fully-qualified OE kickstart (.wks) filename)\n" % wks_file
sys.exit(1)
list_canned_image_help(scripts_path, fullpath)
return True
else:
return False
return False
########NEW FILE########
__FILENAME__ = help
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module implements some basic help invocation functions along
# with the bulk of the help topic text for the OE Core Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import subprocess
import logging
def subcommand_error(args):
logging.info("invalid subcommand %s" % args[0])
def display_help(subcommand, subcommands):
"""
Display help for subcommand.
"""
if subcommand not in subcommands:
return False
help = subcommands.get(subcommand, subcommand_error)[2]
pager = subprocess.Popen('less', stdin=subprocess.PIPE)
pager.communicate(help)
return True
def wic_help(args, usage_str, subcommands):
"""
Subcommand help dispatcher.
"""
if len(args) == 1 or not display_help(args[1], subcommands):
print(usage_str)
def invoke_subcommand(args, parser, main_command_usage, subcommands):
"""
Dispatch to subcommand handler borrowed from combo-layer.
Should use argparse, but has to work in 2.6.
"""
if not args:
logging.error("No subcommand specified, exiting")
parser.print_help()
elif args[0] == "help":
wic_help(args, main_command_usage, subcommands)
elif args[0] not in subcommands:
logging.error("Unsupported subcommand %s, exiting\n" % (args[0]))
parser.print_help()
else:
usage = subcommands.get(args[0], subcommand_error)[1]
subcommands.get(args[0], subcommand_error)[0](args[1:], usage)
##
# wic help and usage strings
##
wic_usage = """
Create a customized OpenEmbedded image
usage: wic [--version] [--help] COMMAND [ARGS]
Current 'wic' commands are:
create Create a new OpenEmbedded image
list List available values for options and image properties
See 'wic help COMMAND' for more information on a specific command.
"""
wic_help_usage = """
usage: wic help <subcommand>
This command displays detailed help for the specified subcommand.
"""
wic_create_usage = """
Create a new OpenEmbedded image
usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
[-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
[-e | --image-name] [-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-s, --skip-build-check]
This command creates an OpenEmbedded image based on the 'OE kickstart
commands' found in the <wks file>.
The -o option can be used to place the image in a directory with a
different name and location.
See 'wic help create' for more detailed instructions.
"""
wic_create_help = """
NAME
wic create - Create a new OpenEmbedded image
SYNOPSIS
wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
[-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
[-e | --image-name] [-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-s, --skip-build-check]
DESCRIPTION
This command creates an OpenEmbedded image based on the 'OE
kickstart commands' found in the <wks file>.
In order to do this, wic needs to know the locations of the
various build artifacts required to build the image.
Users can explicitly specify the build artifact locations using
the -r, -b, -k, and -n options. See below for details on where
the corresponding artifacts are typically found in a normal
OpenEmbedded build.
Alternatively, users can use the -e option to have 'mic' determine
those locations for a given image. If the -e option is used, the
user needs to have set the appropriate MACHINE variable in
local.conf, and have sourced the build environment.
The -e option is used to specify the name of the image to use the
artifacts from e.g. core-image-sato.
The -r option is used to specify the path to the /rootfs dir to
use as the .wks rootfs source.
The -b option is used to specify the path to the dir containing
the boot artifacts (e.g. /EFI or /syslinux dirs) to use as the
.wks bootimg source.
The -k option is used to specify the path to the dir containing
the kernel to use in the .wks bootimg.
The -n option is used to specify the path to the native sysroot
containing the tools to use to build the image.
The -s option is used to skip the build check. The build check is
a simple sanity check used to determine whether the user has
sourced the build environment so that the -e option can operate
correctly. If the user has specified the build artifact locations
explicitly, 'wic' assumes the user knows what he or she is doing
and skips the build check.
When 'wic -e' is used, the locations for the build artifacts
values are determined by 'wic -e' from the output of the 'bitbake
-e' command given an image name e.g. 'core-image-minimal' and a
given machine set in local.conf. In that case, the image is
created as if the following 'bitbake -e' variables were used:
-r: IMAGE_ROOTFS
-k: STAGING_KERNEL_DIR
-n: STAGING_DIR_NATIVE
-b: HDDDIR and STAGING_DATA_DIR (handlers decide which to use)
If 'wic -e' is not used, the user needs to select the appropriate
value for -b (as well as -r, -k, and -n).
The -o option can be used to place the image in a directory with a
different name and location.
As an alternative to the wks file, the image-specific properties
that define the values that will be used to generate a particular
image can be specified on the command-line using the -i option and
supplying a JSON object consisting of the set of name:value pairs
needed by image creation.
The set of properties available for a given image type can be
listed using the 'wic list' command.
"""
wic_list_usage = """
List available OpenEmbedded image properties and values
usage: wic list images
wic list \n"
msger.debug("writing image XML to %s/%s.xml" %(self._outdir, self.name))
cfg = open("%s/%s.xml" % (self._outdir, self.name), "w")
cfg.write(xml)
cfg.close()
def generate_bmap(self):
""" Generate block map file for the image. The idea is that while disk
images we generate may be large (e.g., 4GiB), they may actually contain
only little real data, e.g., 512MiB. This data are files, directories,
file-system meta-data, partition table, etc. In other words, when
flashing the image to the target device, you do not have to copy all the
4GiB of data, you can copy only 512MiB of it, which is 4 times faster.
This function generates the block map file for an arbitrary image that
mic has generated. The block map file is basically an XML file which
contains a list of blocks which have to be copied to the target device.
The other blocks are not used and there is no need to copy them. """
if self.bmap_needed is None:
return
from mic.utils import BmapCreate
msger.info("Generating the map file(s)")
for name in self.__disks.keys():
image = self._full_path(self.__imgdir, name, self.__disk_format)
bmap_file = self._full_path(self._outdir, name, "bmap")
msger.debug("Generating block map file '%s'" % bmap_file)
try:
creator = BmapCreate.BmapCreate(image, bmap_file)
creator.generate()
del creator
except BmapCreate.Error as err:
raise CreatorError("Failed to create bmap file: %s" % str(err))
########NEW FILE########
__FILENAME__ = desktop
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010 Intel, Inc.
#
# Yi Yang <yi.y.yang@intel.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options import *
class Mic_Desktop(KickstartCommand):
def __init__(self, writePriority=0,
defaultdesktop=None,
defaultdm=None,
autologinuser=None,
session=None):
KickstartCommand.__init__(self, writePriority)
self.__new_version = False
self.op = self._getParser()
self.defaultdesktop = defaultdesktop
self.autologinuser = autologinuser
self.defaultdm = defaultdm
self.session = session
def __str__(self):
retval = ""
if self.defaultdesktop != None:
retval += " --defaultdesktop=%s" % self.defaultdesktop
if self.session != None:
retval += " --session=\"%s\"" % self.session
if self.autologinuser != None:
retval += " --autologinuser=%s" % self.autologinuser
if self.defaultdm != None:
retval += " --defaultdm=%s" % self.defaultdm
if retval != "":
retval = "# Default Desktop Settings\ndesktop %s\n" % retval
return retval
def _getParser(self):
try:
op = KSOptionParser(lineno=self.lineno)
except TypeError:
# the latest version has not lineno argument
op = KSOptionParser()
self.__new_version = True
op.add_option("--defaultdesktop", dest="defaultdesktop",
action="store",
type="string",
nargs=1)
op.add_option("--autologinuser", dest="autologinuser",
action="store",
type="string",
nargs=1)
op.add_option("--defaultdm", dest="defaultdm",
action="store",
type="string",
nargs=1)
op.add_option("--session", dest="session",
action="store",
type="string",
nargs=1)
return op
def parse(self, args):
if self.__new_version:
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
else:
(opts, extra) = self.op.parse_args(args=args)
if extra:
m = _("Unexpected arguments to %(command)s command: %(options)s") \
% {"command": "desktop", "options": extra}
raise KickstartValueError, formatErrorMsg(self.lineno, msg=m)
self._setToSelf(self.op, opts)
########NEW FILE########
__FILENAME__ = installerfw
#!/usr/bin/python -tt
#
# Copyright (c) 2013 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pykickstart.base import *
from pykickstart.options import *
class Mic_installerfw(KickstartCommand):
""" This class implements the "installerfw" KS option. The argument
of the option is a comman-separated list of MIC features which have to be
disabled and instead, will be done in the installer. For example,
"installerfw=extlinux" disables all the MIC code which installs extlinux to
the target images, and instead, the extlinux or whatever boot-loader will
be installed by the installer instead.
The installer is a tool which is external to MIC, it comes from the
installation repositories and can be executed by MIC in order to perform
various configuration actions. The main point here is to make sure MIC has
no hard-wired knoledge about the target OS configuration. """
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, *args, **kwargs):
KickstartCommand.__init__(self, *args, **kwargs)
self.op = self._getParser()
self.features = kwargs.get("installerfw", None)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.features:
retval += "# Enable installer framework features\ninstallerfw\n"
return retval
def _getParser(self):
op = KSOptionParser()
return op
def parse(self, args):
(_, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 1:
msg = "Kickstart command \"installerfw\" requires one " \
"argumet - a list of legacy features to disable"
raise KickstartValueError, formatErrorMsg(self.lineno, msg = msg)
self.features = extra[0].split(",")
return self
########NEW FILE########
__FILENAME__ = micboot
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010 Intel, Inc.
#
# Anas Nashif
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options import *
from pykickstart.commands.bootloader import *
class Mic_Bootloader(F8_Bootloader):
def __init__(self, writePriority=10, appendLine="", driveorder=None,
forceLBA=False, location="", md5pass="", password="",
upgrade=False, menus=""):
F8_Bootloader.__init__(self, writePriority, appendLine, driveorder,
forceLBA, location, md5pass, password, upgrade)
self.menus = ""
self.ptable = "msdos"
def _getArgsAsStr(self):
ret = F8_Bootloader._getArgsAsStr(self)
if self.menus == "":
ret += " --menus=%s" %(self.menus,)
if self.ptable:
ret += " --ptable=\"%s\"" %(self.ptable,)
return ret
def _getParser(self):
op = F8_Bootloader._getParser(self)
op.add_option("--menus", dest="menus")
op.add_option("--ptable", dest="ptable", type="string")
return op
########NEW FILE########
__FILENAME__ = micpartition
#!/usr/bin/python -tt
#
# Marko Saukko <marko.saukko@cybercom.com>
#
# Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pykickstart.commands.partition import *
class Mic_PartData(FC4_PartData):
removedKeywords = FC4_PartData.removedKeywords
removedAttrs = FC4_PartData.removedAttrs
def __init__(self, *args, **kwargs):
FC4_PartData.__init__(self, *args, **kwargs)
self.deleteRemovedAttrs()
self.align = kwargs.get("align", None)
self.extopts = kwargs.get("extopts", None)
self.part_type = kwargs.get("part_type", None)
def _getArgsAsStr(self):
retval = FC4_PartData._getArgsAsStr(self)
if self.align:
retval += " --align"
if self.extopts:
retval += " --extoptions=%s" % self.extopts
if self.part_type:
retval += " --part-type=%s" % self.part_type
return retval
class Mic_Partition(FC4_Partition):
removedKeywords = FC4_Partition.removedKeywords
removedAttrs = FC4_Partition.removedAttrs
def _getParser(self):
op = FC4_Partition._getParser(self)
# The alignment value is given in kBytes. e.g., value 8 means that
# the partition is aligned to start from 8096 byte boundary.
op.add_option("--align", type="int", action="store", dest="align",
default=None)
op.add_option("--extoptions", type="string", action="store", dest="extopts",
default=None)
op.add_option("--part-type", type="string", action="store", dest="part_type",
default=None)
return op
########NEW FILE########
__FILENAME__ = micrepo
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010 Intel, Inc.
#
# Yi Yang <yi.y.yang@intel.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options import *
from pykickstart.commands.repo import *
class Mic_RepoData(F8_RepoData):
def __init__(self, baseurl="", mirrorlist=None, name="", priority=None,
includepkgs=(), excludepkgs=(), save=False, proxy=None,
proxy_username=None, proxy_password=None, debuginfo=False,
source=False, gpgkey=None, disable=False, ssl_verify="yes",
nocache=False):
kw = {}
# F8_RepoData keywords
if includepkgs:
kw['includepkgs'] = includepkgs
if excludepkgs:
kw['excludepkgs'] = excludepkgs
#FC6_RepoData keywords
if baseurl:
kw['baseurl'] = baseurl
if mirrorlist:
kw['mirrorlist'] = mirrorlist
if name:
kw['name'] = name
F8_RepoData.__init__(self, **kw)
self.save = save
self.proxy = proxy
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.debuginfo = debuginfo
self.disable = disable
self.source = source
self.gpgkey = gpgkey
self.ssl_verify = ssl_verify.lower()
self.priority = priority
self.nocache = nocache
def _getArgsAsStr(self):
retval = F8_RepoData._getArgsAsStr(self)
if self.save:
retval += " --save"
if self.proxy:
retval += " --proxy=%s" % self.proxy
if self.proxy_username:
retval += " --proxyuser=%s" % self.proxy_username
if self.proxy_password:
retval += " --proxypasswd=%s" % self.proxy_password
if self.debuginfo:
retval += " --debuginfo"
if self.source:
retval += " --source"
if self.gpgkey:
retval += " --gpgkey=%s" % self.gpgkey
if self.disable:
retval += " --disable"
if self.ssl_verify:
retval += " --ssl_verify=%s" % self.ssl_verify
if self.priority:
retval += " --priority=%s" % self.priority
if self.nocache:
retval += " --nocache"
return retval
class Mic_Repo(F8_Repo):
def __init__(self, writePriority=0, repoList=None):
F8_Repo.__init__(self, writePriority, repoList)
def __str__(self):
retval = ""
for repo in self.repoList:
retval += repo.__str__()
return retval
def _getParser(self):
def list_cb (option, opt_str, value, parser):
for d in value.split(','):
parser.values.ensure_value(option.dest, []).append(d)
op = F8_Repo._getParser(self)
op.add_option("--save", action="store_true", dest="save",
default=False)
op.add_option("--proxy", type="string", action="store", dest="proxy",
default=None, nargs=1)
op.add_option("--proxyuser", type="string", action="store",
dest="proxy_username", default=None, nargs=1)
op.add_option("--proxypasswd", type="string", action="store",
dest="proxy_password", default=None, nargs=1)
op.add_option("--debuginfo", action="store_true", dest="debuginfo",
default=False)
op.add_option("--source", action="store_true", dest="source",
default=False)
op.add_option("--disable", action="store_true", dest="disable",
default=False)
op.add_option("--gpgkey", type="string", action="store", dest="gpgkey",
default=None, nargs=1)
op.add_option("--ssl_verify", type="string", action="store",
dest="ssl_verify", default="yes")
op.add_option("--priority", type="int", action="store", dest="priority",
default=None)
op.add_option("--nocache", action="store_true", dest="nocache",
default=False)
return op
########NEW FILE########
__FILENAME__ = partition
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import shutil
from pykickstart.commands.partition import *
from mic.utils.oe.misc import *
from mic.kickstart.custom_commands import *
from mic.plugin import pluginmgr
partition_methods = {
"do_stage_partition":None,
"do_prepare_partition":None,
"do_configure_partition":None,
}
class Wic_PartData(Mic_PartData):
removedKeywords = Mic_PartData.removedKeywords
removedAttrs = Mic_PartData.removedAttrs
def __init__(self, *args, **kwargs):
Mic_PartData.__init__(self, *args, **kwargs)
self.deleteRemovedAttrs()
self.source = kwargs.get("source", None)
self.rootfs = kwargs.get("rootfs-dir", None)
self.source_file = ""
self.size = 0
def _getArgsAsStr(self):
retval = Mic_PartData._getArgsAsStr(self)
if self.source:
retval += " --source=%s" % self.source
if self.rootfs:
retval += " --rootfs-dir=%s" % self.rootfs
return retval
def get_rootfs(self):
"""
Acessor for rootfs dir
"""
return self.rootfs
def set_rootfs(self, rootfs):
"""
Acessor for actual rootfs dir, which must be set by source
plugins.
"""
self.rootfs = rootfs
def get_size(self):
"""
Accessor for partition size, 0 or --size before set_size().
"""
return self.size
def set_size(self, size):
"""
Accessor for actual partition size, which must be set by source
plugins.
"""
self.size = size
def set_source_file(self, source_file):
"""
Accessor for source_file, the location of the generated partition
image, which must be set by source plugins.
"""
self.source_file = source_file
def get_extra_block_count(self, current_blocks):
"""
The --size param is reflected in self.size (in MB), and we already
have current_blocks (1k) blocks, calculate and return the
number of (1k) blocks we need to add to get to --size, 0 if
we're already there or beyond.
"""
msger.debug("Requested partition size for %s: %d" % \
(self.mountpoint, self.size))
if not self.size:
return 0
requested_blocks = self.size * 1024
msger.debug("Requested blocks %d, current_blocks %d" % \
(requested_blocks, current_blocks))
if requested_blocks > current_blocks:
return requested_blocks - current_blocks
else:
return 0
def prepare(self, cr, cr_workdir, oe_builddir, rootfs_dir, bootimg_dir,
kernel_dir, native_sysroot):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
if not self.source:
if self.fstype and self.fstype == "swap":
self.prepare_swap_partition(cr_workdir, oe_builddir,
native_sysroot)
elif self.fstype:
self.prepare_empty_partition(cr_workdir, oe_builddir,
native_sysroot)
return
self._source_methods = pluginmgr.get_source_plugin_methods(self.source, partition_methods)
self._source_methods["do_configure_partition"](self, cr, cr_workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
native_sysroot)
self._source_methods["do_stage_partition"](self, cr, cr_workdir,
oe_builddir,
bootimg_dir, kernel_dir,
native_sysroot)
self._source_methods["do_prepare_partition"](self, cr, cr_workdir,
oe_builddir,
bootimg_dir, kernel_dir, rootfs_dir,
native_sysroot)
def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir,
rootfs_dir):
"""
Handle an already-created partition e.g. xxx.ext3
"""
rootfs = oe_builddir
du_cmd = "du -Lbms %s" % rootfs
rc, out = exec_cmd(du_cmd)
rootfs_size = out.split()[0]
self.size = rootfs_size
self.source_file = rootfs
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
Currently handles ext2/3/4 and btrfs.
"""
pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot
pseudo += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % rootfs_dir
pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
pseudo += "%s/usr/bin/pseudo " % native_sysroot
if self.fstype.startswith("ext"):
return self.prepare_rootfs_ext(cr_workdir, oe_builddir,
rootfs_dir, native_sysroot,
pseudo)
elif self.fstype.startswith("btrfs"):
return self.prepare_rootfs_btrfs(cr_workdir, oe_builddir,
rootfs_dir, native_sysroot,
pseudo)
def prepare_rootfs_ext(self, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
"""
image_rootfs = rootfs_dir
rootfs = "%s/rootfs_%s.%s" % (cr_workdir, self.label ,self.fstype)
du_cmd = "du -ks %s" % image_rootfs
rc, out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
extra_blocks = self.get_extra_block_count(actual_rootfs_size)
if extra_blocks < IMAGE_EXTRA_SPACE:
extra_blocks = IMAGE_EXTRA_SPACE
rootfs_size = actual_rootfs_size + extra_blocks
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, self.mountpoint, rootfs_size))
dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
(rootfs, rootfs_size)
rc, out = exec_cmd(dd_cmd)
extra_imagecmd = "-i 8192"
mkfs_cmd = "mkfs.%s -F %s %s -d %s" % \
(self.fstype, extra_imagecmd, rootfs, image_rootfs)
rc, out = exec_native_cmd(pseudo + mkfs_cmd, native_sysroot)
# get the rootfs size in the right units for kickstart (Mb)
du_cmd = "du -Lbms %s" % rootfs
rc, out = exec_cmd(du_cmd)
rootfs_size = out.split()[0]
self.size = rootfs_size
self.source_file = rootfs
return 0
def prepare_rootfs_btrfs(self, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
Currently handles ext2/3/4 and btrfs.
"""
image_rootfs = rootfs_dir
rootfs = "%s/rootfs_%s.%s" % (cr_workdir, self.label, self.fstype)
du_cmd = "du -ks %s" % image_rootfs
rc, out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
extra_blocks = self.get_extra_block_count(actual_rootfs_size)
if extra_blocks < IMAGE_EXTRA_SPACE:
extra_blocks = IMAGE_EXTRA_SPACE
rootfs_size = actual_rootfs_size + extra_blocks
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, self.mountpoint, rootfs_size))
dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
(rootfs, rootfs_size)
rc, out = exec_cmd(dd_cmd)
mkfs_cmd = "mkfs.%s -b %d -r %s %s" % \
(self.fstype, rootfs_size * 1024, image_rootfs, rootfs)
rc, out = exec_native_cmd(pseudo + mkfs_cmd, native_sysroot)
# get the rootfs size in the right units for kickstart (Mb)
du_cmd = "du -Lbms %s" % rootfs
rc, out = exec_cmd(du_cmd)
rootfs_size = out.split()[0]
self.size = rootfs_size
self.source_file = rootfs
def prepare_empty_partition(self, cr_workdir, oe_builddir, native_sysroot):
"""
Prepare an empty partition.
"""
if self.fstype.startswith("ext"):
return self.prepare_empty_partition_ext(cr_workdir, oe_builddir,
native_sysroot)
elif self.fstype.startswith("btrfs"):
return self.prepare_empty_partition_btrfs(cr_workdir, oe_builddir,
native_sysroot)
def prepare_empty_partition_ext(self, cr_workdir, oe_builddir,
native_sysroot):
"""
Prepare an empty ext2/3/4 partition.
"""
fs = "%s/fs.%s" % (cr_workdir, self.fstype)
dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \
(fs, self.size)
rc, out = exec_cmd(dd_cmd)
extra_imagecmd = "-i 8192"
mkfs_cmd = "mkfs.%s -F %s %s" % (self.fstype, extra_imagecmd, fs)
rc, out = exec_native_cmd(mkfs_cmd, native_sysroot)
self.source_file = fs
return 0
def prepare_empty_partition_btrfs(self, cr_workdir, oe_builddir,
native_sysroot):
"""
Prepare an empty btrfs partition.
"""
fs = "%s/fs.%s" % (cr_workdir, self.fstype)
dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \
(fs, self.size)
rc, out = exec_cmd(dd_cmd)
mkfs_cmd = "mkfs.%s -b %d %s" % (self.fstype, self.size * 1024, rootfs)
rc, out = exec_native_cmd(mkfs_cmd, native_sysroot)
mkfs_cmd = "mkfs.%s -F %s %s" % (self.fstype, extra_imagecmd, fs)
rc, out = exec_native_cmd(mkfs_cmd, native_sysroot)
self.source_file = fs
return 0
def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
"""
Prepare a swap partition.
"""
fs = "%s/fs.%s" % (cr_workdir, self.fstype)
dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \
(fs, self.size)
rc, out = exec_cmd(dd_cmd)
import uuid
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, str(uuid.uuid1()), fs)
rc, out = exec_native_cmd(mkswap_cmd, native_sysroot)
self.source_file = fs
return 0
class Wic_Partition(Mic_Partition):
removedKeywords = Mic_Partition.removedKeywords
removedAttrs = Mic_Partition.removedAttrs
def _getParser(self):
op = Mic_Partition._getParser(self)
# use specified source file to fill the partition
# and calculate partition size
op.add_option("--source", type="string", action="store",
dest="source", default=None)
# use specified rootfs path to fill the partition
op.add_option("--rootfs-dir", type="string", action="store",
dest="rootfs", default=None)
return op
########NEW FILE########
__FILENAME__ = wicboot
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module provides the OpenEmbedded bootloader object definitions.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options import *
from pykickstart.commands.bootloader import *
from mic.kickstart.custom_commands.micboot import *
class Wic_Bootloader(Mic_Bootloader):
def __init__(self, writePriority=10, appendLine="", driveorder=None,
forceLBA=False, location="", md5pass="", password="",
upgrade=False, menus=""):
Mic_Bootloader.__init__(self, writePriority, appendLine, driveorder,
forceLBA, location, md5pass, password, upgrade)
self.source = ""
def _getArgsAsStr(self):
retval = Mic_Bootloader._getArgsAsStr(self)
if self.source:
retval += " --source=%s" % self.source
return retval
def _getParser(self):
op = Mic_Bootloader._getParser(self)
# use specified source plugin to implement bootloader-specific methods
op.add_option("--source", type="string", action="store",
dest="source", default=None)
return op
########NEW FILE########
__FILENAME__ = msger
#!/usr/bin/python -tt
# vim: ai ts=4 sts=4 et sw=4
#
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os,sys
import re
import time
__ALL__ = ['set_mode',
'get_loglevel',
'set_loglevel',
'set_logfile',
'raw',
'debug',
'verbose',
'info',
'warning',
'error',
'ask',
'pause',
]
# COLORs in ANSI
INFO_COLOR = 32 # green
WARN_COLOR = 33 # yellow
ERR_COLOR = 31 # red
ASK_COLOR = 34 # blue
NO_COLOR = 0
PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S)
INTERACTIVE = True
LOG_LEVEL = 1
LOG_LEVELS = {
'quiet': 0,
'normal': 1,
'verbose': 2,
'debug': 3,
'never': 4,
}
LOG_FILE_FP = None
LOG_CONTENT = ''
CATCHERR_BUFFILE_FD = -1
CATCHERR_BUFFILE_PATH = None
CATCHERR_SAVED_2 = -1
def _general_print(head, color, msg = None, stream = None, level = 'normal'):
global LOG_CONTENT
if not stream:
stream = sys.stdout
if LOG_LEVELS[level] > LOG_LEVEL:
# skip
return
# encode raw 'unicode' str to utf8 encoded str
if msg and isinstance(msg, unicode):
msg = msg.encode('utf-8', 'ignore')
errormsg = ''
if CATCHERR_BUFFILE_FD > 0:
size = os.lseek(CATCHERR_BUFFILE_FD , 0, os.SEEK_END)
os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
errormsg = os.read(CATCHERR_BUFFILE_FD, size)
os.ftruncate(CATCHERR_BUFFILE_FD, 0)
# append error msg to LOG
if errormsg:
LOG_CONTENT += errormsg
# append normal msg to LOG
save_msg = msg.strip() if msg else None
if save_msg:
timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
LOG_CONTENT += timestr + save_msg + '\n'
if errormsg:
_color_print('', NO_COLOR, errormsg, stream, level)
_color_print(head, color, msg, stream, level)
def _color_print(head, color, msg, stream, level):
colored = True
if color == NO_COLOR or \
not stream.isatty() or \
os.getenv('ANSI_COLORS_DISABLED') is not None:
colored = False
if head.startswith('\r'):
# need not \n at last
newline = False
else:
newline = True
if colored:
head = '\033[%dm%s:\033[0m ' %(color, head)
if not newline:
# ESC cmd to clear line
head = '\033[2K' + head
else:
if head:
head += ': '
if head.startswith('\r'):
head = head.lstrip()
newline = True
if msg is not None:
if isinstance(msg, unicode):
msg = msg.encode('utf8', 'ignore')
stream.write('%s%s' % (head, msg))
if newline:
stream.write('\n')
stream.flush()
def _color_perror(head, color, msg, level = 'normal'):
if CATCHERR_BUFFILE_FD > 0:
_general_print(head, color, msg, sys.stdout, level)
else:
_general_print(head, color, msg, sys.stderr, level)
def _split_msg(head, msg):
if isinstance(msg, list):
msg = '\n'.join(map(str, msg))
if msg.startswith('\n'):
# means print \n at first
msg = msg.lstrip()
head = '\n' + head
elif msg.startswith('\r'):
# means print \r at first
msg = msg.lstrip()
head = '\r' + head
m = PREFIX_RE.match(msg)
if m:
head += ' <%s>' % m.group(1)
msg = m.group(2)
return head, msg
def get_loglevel():
return (k for k,v in LOG_LEVELS.items() if v==LOG_LEVEL).next()
def set_loglevel(level):
global LOG_LEVEL
if level not in LOG_LEVELS:
# no effect
return
LOG_LEVEL = LOG_LEVELS[level]
def set_interactive(mode=True):
global INTERACTIVE
if mode:
INTERACTIVE = True
else:
INTERACTIVE = False
def log(msg=''):
# log msg to LOG_CONTENT then save to logfile
global LOG_CONTENT
if msg:
LOG_CONTENT += msg
def raw(msg=''):
_general_print('', NO_COLOR, msg)
def info(msg):
head, msg = _split_msg('Info', msg)
_general_print(head, INFO_COLOR, msg)
def verbose(msg):
head, msg = _split_msg('Verbose', msg)
_general_print(head, INFO_COLOR, msg, level = 'verbose')
def warning(msg):
head, msg = _split_msg('Warning', msg)
_color_perror(head, WARN_COLOR, msg)
def debug(msg):
head, msg = _split_msg('Debug', msg)
_color_perror(head, ERR_COLOR, msg, level = 'debug')
def error(msg):
head, msg = _split_msg('Error', msg)
_color_perror(head, ERR_COLOR, msg)
sys.exit(1)
def ask(msg, default=True):
_general_print('\rQ', ASK_COLOR, '')
try:
if default:
msg += '(Y/n) '
else:
msg += '(y/N) '
if INTERACTIVE:
while True:
repl = raw_input(msg)
if repl.lower() == 'y':
return True
elif repl.lower() == 'n':
return False
elif not repl.strip():
# <Enter>
return default
# else loop
else:
if default:
msg += ' Y'
else:
msg += ' N'
_general_print('', NO_COLOR, msg)
return default
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.exit(2)
def choice(msg, choices, default=0):
if default >= len(choices):
return None
_general_print('\rQ', ASK_COLOR, '')
try:
msg += " [%s] " % '/'.join(choices)
if INTERACTIVE:
while True:
repl = raw_input(msg)
if repl in choices:
return repl
elif not repl.strip():
return choices[default]
else:
msg += choices[default]
_general_print('', NO_COLOR, msg)
return choices[default]
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.exit(2)
def pause(msg=None):
if INTERACTIVE:
_general_print('\rQ', ASK_COLOR, '')
if msg is None:
msg = 'press <ENTER> to continue ...'
raw_input(msg)
def set_logfile(fpath):
global LOG_FILE_FP
def _savelogf():
if LOG_FILE_FP:
fp = open(LOG_FILE_FP, 'w')
fp.write(LOG_CONTENT)
fp.close()
if LOG_FILE_FP is not None:
warning('duplicate log file configuration')
LOG_FILE_FP = fpath
import atexit
atexit.register(_savelogf)
def enable_logstderr(fpath):
global CATCHERR_BUFFILE_FD
global CATCHERR_BUFFILE_PATH
global CATCHERR_SAVED_2
if os.path.exists(fpath):
os.remove(fpath)
CATCHERR_BUFFILE_PATH = fpath
CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT)
CATCHERR_SAVED_2 = os.dup(2)
os.dup2(CATCHERR_BUFFILE_FD, 2)
def disable_logstderr():
global CATCHERR_BUFFILE_FD
global CATCHERR_BUFFILE_PATH
global CATCHERR_SAVED_2
raw(msg = None) # flush message buffer and print it.
os.dup2(CATCHERR_SAVED_2, 2)
os.close(CATCHERR_SAVED_2)
os.close(CATCHERR_BUFFILE_FD)
os.unlink(CATCHERR_BUFFILE_PATH)
CATCHERR_BUFFILE_FD = -1
CATCHERR_BUFFILE_PATH = None
CATCHERR_SAVED_2 = -1
########NEW FILE########
__FILENAME__ = plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
from mic import msger
from mic import pluginbase
from mic.utils import errors
from mic.utils.oe.misc import *
__ALL__ = ['PluginMgr', 'pluginmgr']
PLUGIN_TYPES = ["imager", "source"] # TODO "hook"
PLUGIN_DIR = "/lib/mic/plugins" # relative to scripts
SCRIPTS_PLUGIN_DIR = "scripts" + PLUGIN_DIR
class PluginMgr(object):
plugin_dirs = {}
# make the manager class as singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
mic_path = os.path.dirname(__file__)
eos = mic_path.find('scripts') + len('scripts')
scripts_path = mic_path[:eos]
self.scripts_path = scripts_path
self.plugin_dir = scripts_path + PLUGIN_DIR
self.layers_path = None
def _build_plugin_dir_list(self, dl, ptype):
if self.layers_path is None:
self.layers_path = get_bitbake_var("BBLAYERS")
layer_dirs = []
for layer_path in self.layers_path.split():
path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR, ptype)
layer_dirs.append(path)
path = os.path.join(dl, ptype)
layer_dirs.append(path)
return layer_dirs
def append_dirs(self, dirs):
for path in dirs:
self._add_plugindir(path)
# load all the plugins AGAIN
self._load_all()
def _add_plugindir(self, path):
path = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(path):
msger.debug("Plugin dir is not a directory or does not exist: %s"\
% path)
return
if path not in self.plugin_dirs:
self.plugin_dirs[path] = False
# the value True/False means "loaded"
def _load_all(self):
for (pdir, loaded) in self.plugin_dirs.iteritems():
if loaded: continue
sys.path.insert(0, pdir)
for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]:
if mod and mod != '__init__':
if mod in sys.modules:
#self.plugin_dirs[pdir] = True
msger.warning("Module %s already exists, skip" % mod)
else:
try:
pymod = __import__(mod)
self.plugin_dirs[pdir] = True
msger.debug("Plugin module %s:%s imported"\
% (mod, pymod.__file__))
except ImportError, err:
msg = 'Failed to load plugin %s/%s: %s' \
% (os.path.basename(pdir), mod, err)
msger.warning(msg)
del(sys.path[0])
def get_plugins(self, ptype):
""" the return value is dict of name:class pairs """
if ptype not in PLUGIN_TYPES:
raise errors.CreatorError('%s is not valid plugin type' % ptype)
plugins_dir = self._build_plugin_dir_list(self.plugin_dir, ptype)
self.append_dirs(plugins_dir)
return pluginbase.get_plugins(ptype)
def get_source_plugin_methods(self, source_name, methods):
"""
The methods param is a dict with the method names to find. On
return, the dict values will be filled in with pointers to the
corresponding methods. If one or more methods are not found,
None is returned.
"""
return_methods = None
for _source_name, klass in self.get_plugins('source').iteritems():
if _source_name == source_name:
for _method_name in methods.keys():
if not hasattr(klass, _method_name):
msger.warning("Unimplemented %s source interface for: %s"\
% (_method_name, _source_name))
return None
func = getattr(klass, _method_name)
methods[_method_name] = func
return_methods = methods
return return_methods
pluginmgr = PluginMgr()
########NEW FILE########
__FILENAME__ = pluginbase
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
from mic import msger
from mic.utils import errors
class _Plugin(object):
class __metaclass__(type):
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'plugins'):
cls.plugins = {}
elif 'mic_plugin_type' in attrs:
if attrs['mic_plugin_type'] not in cls.plugins:
cls.plugins[attrs['mic_plugin_type']] = {}
elif hasattr(cls, 'mic_plugin_type') and 'name' in attrs:
cls.plugins[cls.mic_plugin_type][attrs['name']] = cls
def show_plugins(cls):
for cls in cls.plugins[cls.mic_plugin_type]:
print cls
def get_plugins(cls):
return cls.plugins
class ImagerPlugin(_Plugin):
mic_plugin_type = "imager"
@classmethod
def check_image_exists(self, destdir, apacking=None,
images=(),
release=None):
# if it's a packing file, reset images
if apacking:
images = [apacking]
# release option will override images
if release is not None:
images = [os.path.basename(destdir.rstrip('/'))]
destdir = os.path.dirname(destdir.rstrip('/'))
for name in images:
if not name:
continue
image = os.path.join(destdir, name)
if not os.path.exists(image):
continue
if msger.ask("Target image/dir: %s already exists, "
"clean up and continue?" % image):
if os.path.isdir(image):
shutil.rmtree(image)
else:
os.unlink(image)
else:
raise errors.Abort("Cancled")
def do_create(self):
pass
def do_chroot(self):
pass
class SourcePlugin(_Plugin):
mic_plugin_type = "source"
"""
The methods that can be implemented by --source plugins.
Any methods not implemented in a subclass inherit these.
"""
@classmethod
def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. This provides a hook to allow finalization of a
disk image e.g. to write an MBR to it.
"""
msger.debug("SourcePlugin: do_install_disk: disk: %s" % disk_name)
@classmethod
def do_stage_partition(self, part, cr, workdir, oe_builddir, bootimg_dir,
kernel_dir, native_sysroot):
"""
Special content staging hook called before do_prepare_partition(),
normally empty.
Typically, a partition will just use the passed-in parame e.g
straight bootimg_dir, etc, but in some cases, things need to
be more tailored e.g. to use a deploy dir + /boot, etc. This
hook allows those files to be staged in a customized fashion.
Not that get_bitbake_var() allows you to acces non-standard
variables that you might want to use for this.
"""
msger.debug("SourcePlugin: do_stage_partition: part: %s" % part)
@classmethod
def do_configure_partition(self, part, cr, cr_workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called before do_prepare_partition(), typically used to create
custom configuration files for a partition, for example
syslinux or grub config files.
"""
msger.debug("SourcePlugin: do_configure_partition: part: %s" % part)
@classmethod
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part)
class BackendPlugin(_Plugin):
mic_plugin_type="backend"
def addRepository(self):
pass
def get_plugins(typen):
ps = ImagerPlugin.get_plugins()
if typen in ps:
return ps[typen]
else:
return None
__all__ = ['ImagerPlugin', 'BackendPlugin', 'SourcePlugin', 'get_plugins']
########NEW FILE########
__FILENAME__ = yumpkgmgr
#!/usr/bin/python -tt
#
# Copyright (c) 2007 Red Hat Inc.
# Copyright (c) 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import re
import tempfile
import glob
from string import Template
import rpmUtils
import yum
from mic import msger
from mic.kickstart import ksparser
from mic.utils import misc, rpmmisc
from mic.utils.grabber import TextProgress
from mic.utils.proxy import get_proxy_for
from mic.utils.errors import CreatorError
from mic.imager.baseimager import BaseImageCreator
YUMCONF_TEMP = """[main]
installroot=$installroot
cachedir=/var/cache/yum
persistdir=/var/lib/yum
plugins=0
reposdir=
failovermethod=priority
http_caching=packages
sslverify=1
"""
class MyYumRepository(yum.yumRepo.YumRepository):
def __del__(self):
pass
def dirSetup(self):
super(MyYumRepository, self).dirSetup()
# relocate package dir
pkgdir = os.path.join(self.basecachedir, 'packages', self.id)
self.setAttribute('_dir_setup_pkgdir', pkgdir)
self._dirSetupMkdir_p(self.pkgdir)
def _getFile(self, url=None,
relative=None,
local=None,
start=None,
end=None,
copy_local=None,
checkfunc=None,
text=None,
reget='simple',
cache=True,
size=None):
m2c_connection = None
if not self.sslverify:
try:
import M2Crypto
m2c_connection = M2Crypto.SSL.Connection.clientPostConnectionCheck
M2Crypto.SSL.Connection.clientPostConnectionCheck = None
except ImportError, err:
raise CreatorError("%s, please try to install python-m2crypto" % str(err))
proxy = None
if url:
proxy = get_proxy_for(url)
else:
proxy = get_proxy_for(self.urls[0])
if proxy:
self.proxy = str(proxy)
size = int(size) if size else None
rvalue = super(MyYumRepository, self)._getFile(url,
relative,
local,
start,
end,
copy_local,
checkfunc,
text,
reget,
cache,
size)
if m2c_connection and \
not M2Crypto.SSL.Connection.clientPostConnectionCheck:
M2Crypto.SSL.Connection.clientPostConnectionCheck = m2c_connection
return rvalue
from mic.pluginbase import BackendPlugin
class Yum(BackendPlugin, yum.YumBase):
name = 'yum'
def __init__(self, target_arch, instroot, cachedir):
yum.YumBase.__init__(self)
self.cachedir = cachedir
self.instroot = instroot
self.target_arch = target_arch
if self.target_arch:
if not rpmUtils.arch.arches.has_key(self.target_arch):
rpmUtils.arch.arches["armv7hl"] = "noarch"
rpmUtils.arch.arches["armv7tnhl"] = "armv7nhl"
rpmUtils.arch.arches["armv7tnhl"] = "armv7thl"
rpmUtils.arch.arches["armv7thl"] = "armv7hl"
rpmUtils.arch.arches["armv7nhl"] = "armv7hl"
self.arch.setup_arch(self.target_arch)
self.__pkgs_license = {}
self.__pkgs_content = {}
self.__pkgs_vcsinfo = {}
self.install_debuginfo = False
def doFileLogSetup(self, uid, logfile):
# don't do the file log for the livecd as it can lead to open fds
# being left and an inability to clean up after ourself
pass
def close(self):
try:
os.unlink(self.confpath)
os.unlink(self.conf.installroot + "/yum.conf")
except:
pass
if self.ts:
self.ts.close()
self._delRepos()
self._delSacks()
yum.YumBase.close(self)
self.closeRpmDB()
if not os.path.exists("/etc/fedora-release") and \
not os.path.exists("/etc/meego-release"):
for i in range(3, os.sysconf("SC_OPEN_MAX")):
try:
os.close(i)
except:
pass
def __del__(self):
pass
def _writeConf(self, confpath, installroot):
conf = Template(YUMCONF_TEMP).safe_substitute(installroot=installroot)
f = file(confpath, "w+")
f.write(conf)
f.close()
os.chmod(confpath, 0644)
def _cleanupRpmdbLocks(self, installroot):
# cleans up temporary files left by bdb so that differing
# versions of rpm don't cause problems
for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
os.unlink(f)
def setup(self):
# create yum.conf
(fn, self.confpath) = tempfile.mkstemp(dir=self.cachedir,
prefix='yum.conf-')
os.close(fn)
self._writeConf(self.confpath, self.instroot)
self._cleanupRpmdbLocks(self.instroot)
# do setup
self.doConfigSetup(fn = self.confpath, root = self.instroot)
self.conf.cache = 0
self.doTsSetup()
self.doRpmDBSetup()
self.doRepoSetup()
self.doSackSetup()
def preInstall(self, pkg):
# FIXME: handle pre-install package
return None
def selectPackage(self, pkg):
"""Select a given package.
Can be specified with name.arch or name*
"""
try:
self.install(pattern = pkg)
return None
except yum.Errors.InstallError:
return "No package(s) available to install"
except yum.Errors.RepoError, e:
raise CreatorError("Unable to download from repo : %s" % (e,))
except yum.Errors.YumBaseError, e:
raise CreatorError("Unable to install: %s" % (e,))
def deselectPackage(self, pkg):
"""Deselect package. Can be specified as name.arch or name*
"""
sp = pkg.rsplit(".", 2)
txmbrs = []
if len(sp) == 2:
txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1])
if len(txmbrs) == 0:
exact, match, unmatch = yum.packages.parsePackages(
self.pkgSack.returnPackages(),
[pkg],
casematch=1)
for p in exact + match:
txmbrs.append(p)
if len(txmbrs) > 0:
for x in txmbrs:
self.tsInfo.remove(x.pkgtup)
# we also need to remove from the conditionals
# dict so that things don't get pulled back in as a result
# of them. yes, this is ugly. conditionals should die.
for req, pkgs in self.tsInfo.conditionals.iteritems():
if x in pkgs:
pkgs.remove(x)
self.tsInfo.conditionals[req] = pkgs
else:
msger.warning("No such package %s to remove" %(pkg,))
def selectGroup(self, grp, include = ksparser.GROUP_DEFAULT):
try:
yum.YumBase.selectGroup(self, grp)
if include == ksparser.GROUP_REQUIRED:
for p in grp.default_packages.keys():
self.deselectPackage(p)
elif include == ksparser.GROUP_ALL:
for p in grp.optional_packages.keys():
self.selectPackage(p)
return None
except (yum.Errors.InstallError, yum.Errors.GroupsError), e:
return e
except yum.Errors.RepoError, e:
raise CreatorError("Unable to download from repo : %s" % (e,))
except yum.Errors.YumBaseError, e:
raise CreatorError("Unable to install: %s" % (e,))
def addRepository(self, name, url = None, mirrorlist = None, proxy = None,
proxy_username = None, proxy_password = None,
inc = None, exc = None, ssl_verify=True, nocache=False,
cost = None, priority=None):
# TODO: Handle priority attribute for repos
def _varSubstitute(option):
# takes a variable and substitutes like yum configs do
option = option.replace("$basearch", rpmUtils.arch.getBaseArch())
option = option.replace("$arch", rpmUtils.arch.getCanonArch())
return option
repo = MyYumRepository(name)
# Set proxy
repo.proxy = proxy
repo.proxy_username = proxy_username
repo.proxy_password = proxy_password
if url:
repo.baseurl.append(_varSubstitute(url))
# check LICENSE files
if not rpmmisc.checkRepositoryEULA(name, repo):
msger.warning('skip repo:%s for failed EULA confirmation' % name)
return None
if mirrorlist:
repo.mirrorlist = _varSubstitute(mirrorlist)
conf = yum.config.RepoConf()
for k, v in conf.iteritems():
if v or not hasattr(repo, k):
repo.setAttribute(k, v)
repo.sslverify = ssl_verify
repo.cache = not nocache
repo.basecachedir = self.cachedir
repo.base_persistdir = self.conf.persistdir
repo.failovermethod = "priority"
repo.metadata_expire = 0
# Enable gpg check for verifying corrupt packages
repo.gpgcheck = 1
repo.enable()
repo.setup(0)
self.repos.add(repo)
if cost:
repo.cost = cost
msger.verbose('repo: %s was added' % name)
return repo
def installLocal(self, pkg, po=None, updateonly=False):
ts = rpmUtils.transaction.initReadOnlyTransaction()
try:
hdr = rpmUtils.miscutils.hdrFromPackage(ts, pkg)
except rpmUtils.RpmUtilsError, e:
raise yum.Errors.MiscError, \
'Could not open local rpm file: %s: %s' % (pkg, e)
self.deselectPackage(hdr['name'])
yum.YumBase.installLocal(self, pkg, po, updateonly)
def installHasFile(self, file):
provides_pkg = self.whatProvides(file, None, None)
dlpkgs = map(
lambda x: x.po,
filter(
lambda txmbr: txmbr.ts_state in ("i", "u"),
self.tsInfo.getMembers()))
for p in dlpkgs:
for q in provides_pkg:
if (p == q):
return True
return False
def runInstall(self, checksize = 0):
os.environ["HOME"] = "/"
os.environ["LD_PRELOAD"] = ""
try:
(res, resmsg) = self.buildTransaction()
except yum.Errors.RepoError, e:
raise CreatorError("Unable to download from repo : %s" %(e,))
if res != 2:
raise CreatorError("Failed to build transaction : %s" \
% str.join("\n", resmsg))
dlpkgs = map(
lambda x: x.po,
filter(
lambda txmbr: txmbr.ts_state in ("i", "u"),
self.tsInfo.getMembers()))
# record all pkg and the content
for pkg in dlpkgs:
pkg_long_name = misc.RPM_FMT % {
'name': pkg.name,
'arch': pkg.arch,
'version': pkg.version,
'release': pkg.release
}
self.__pkgs_content[pkg_long_name] = pkg.files
license = pkg.license
if license in self.__pkgs_license.keys():
self.__pkgs_license[license].append(pkg_long_name)
else:
self.__pkgs_license[license] = [pkg_long_name]
total_count = len(dlpkgs)
cached_count = 0
download_total_size = sum(map(lambda x: int(x.packagesize), dlpkgs))
msger.info("\nChecking packages cached ...")
for po in dlpkgs:
local = po.localPkg()
repo = filter(lambda r: r.id == po.repoid, self.repos.listEnabled())[0]
if not repo.cache and os.path.exists(local):
os.unlink(local)
if not os.path.exists(local):
continue
if not self.verifyPkg(local, po, False):
msger.warning("Package %s is damaged: %s" \
% (os.path.basename(local), local))
else:
download_total_size -= int(po.packagesize)
cached_count +=1
cache_avail_size = misc.get_filesystem_avail(self.cachedir)
if cache_avail_size < download_total_size:
raise CreatorError("No enough space used for downloading.")
# record the total size of installed pkgs
pkgs_total_size = 0L
for x in dlpkgs:
if hasattr(x, 'installedsize'):
pkgs_total_size += int(x.installedsize)
else:
pkgs_total_size += int(x.size)
# check needed size before actually download and install
if checksize and pkgs_total_size > checksize:
raise CreatorError("No enough space used for installing, "
"please resize partition size in ks file")
msger.info("Packages: %d Total, %d Cached, %d Missed" \
% (total_count, cached_count, total_count - cached_count))
try:
repos = self.repos.listEnabled()
for repo in repos:
repo.setCallback(TextProgress(total_count - cached_count))
self.downloadPkgs(dlpkgs)
# FIXME: sigcheck?
self.initActionTs()
self.populateTs(keepold=0)
deps = self.ts.check()
if len(deps) != 0:
# This isn't fatal, Ubuntu has this issue but it is ok.
msger.debug(deps)
msger.warning("Dependency check failed!")
rc = self.ts.order()
if rc != 0:
raise CreatorError("ordering packages for installation failed")
# FIXME: callback should be refactored a little in yum
cb = rpmmisc.RPMInstallCallback(self.ts)
cb.tsInfo = self.tsInfo
cb.filelog = False
msger.warning('\nCaution, do NOT interrupt the installation, '
'else mic cannot finish the cleanup.')
installlogfile = "%s/__catched_stderr.buf" % (self.instroot)
msger.enable_logstderr(installlogfile)
self.runTransaction(cb)
self._cleanupRpmdbLocks(self.conf.installroot)
except rpmUtils.RpmUtilsError, e:
raise CreatorError("mic does NOT support delta rpm: %s" % e)
except yum.Errors.RepoError, e:
raise CreatorError("Unable to download from repo : %s" % e)
except yum.Errors.YumBaseError, e:
raise CreatorError("Unable to install: %s" % e)
finally:
msger.disable_logstderr()
def getVcsInfo(self):
return self.__pkgs_vcsinfo
def getAllContent(self):
return self.__pkgs_content
def getPkgsLicense(self):
return self.__pkgs_license
def getFilelist(self, pkgname):
if not pkgname:
return None
pkg = filter(lambda txmbr: txmbr.po.name == pkgname, self.tsInfo.getMembers())
if not pkg:
return None
return pkg[0].po.filelist
def package_url(self, pkgname):
pkgs = self.pkgSack.searchNevra(name=pkgname)
if pkgs:
proxy = None
proxies = None
url = pkgs[0].remote_url
repoid = pkgs[0].repoid
repos = filter(lambda r: r.id == repoid, self.repos.listEnabled())
if repos:
proxy = repos[0].proxy
if not proxy:
proxy = get_proxy_for(url)
if proxy:
proxies = {str(url.split(':')[0]): str(proxy)}
return (url, proxies)
return (None, None)
########NEW FILE########
__FILENAME__ = zypppkgmgr
#!/usr/bin/python -tt
#
# Copyright (c) 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import urlparse
import rpm
import zypp
if not hasattr(zypp, 'PoolQuery') or \
not hasattr(zypp.RepoManager, 'loadSolvFile'):
raise ImportError("python-zypp in host system cannot support PoolQuery or "
"loadSolvFile interface, please update it to enhanced "
"version which can be found in download.tizen.org/tools")
from mic import msger
from mic.kickstart import ksparser
from mic.utils import misc, rpmmisc, runner, fs_related
from mic.utils.grabber import myurlgrab, TextProgress
from mic.utils.proxy import get_proxy_for
from mic.utils.errors import CreatorError, RepoError, RpmError
from mic.imager.baseimager import BaseImageCreator
class RepositoryStub:
def __init__(self):
self.name = None
self.baseurl = []
self.mirrorlist = None
self.proxy = None
self.proxy_username = None
self.proxy_password = None
self.nocache = False
self.enabled = True
self.autorefresh = True
self.keeppackages = True
self.priority = None
from mic.pluginbase import BackendPlugin
class Zypp(BackendPlugin):
name = 'zypp'
def __init__(self, target_arch, instroot, cachedir):
self.cachedir = cachedir
self.instroot = instroot
self.target_arch = target_arch
self.__pkgs_license = {}
self.__pkgs_content = {}
self.__pkgs_vcsinfo = {}
self.repos = []
self.to_deselect = []
self.localpkgs = {}
self.repo_manager = None
self.repo_manager_options = None
self.Z = None
self.ts = None
self.ts_pre = None
self.incpkgs = {}
self.excpkgs = {}
self.pre_pkgs = []
self.probFilterFlags = [ rpm.RPMPROB_FILTER_OLDPACKAGE,
rpm.RPMPROB_FILTER_REPLACEPKG ]
self.has_prov_query = True
self.install_debuginfo = False
def doFileLogSetup(self, uid, logfile):
# don't do the file log for the livecd as it can lead to open fds
# being left and an inability to clean up after ourself
pass
def closeRpmDB(self):
pass
def close(self):
if self.ts:
self.ts.closeDB()
self.ts = None
if self.ts_pre:
self.ts_pre.closeDB()
self.ts = None
self.closeRpmDB()
if not os.path.exists("/etc/fedora-release") and \
not os.path.exists("/etc/meego-release"):
for i in range(3, os.sysconf("SC_OPEN_MAX")):
try:
os.close(i)
except:
pass
def __del__(self):
self.close()
def _cleanupRpmdbLocks(self, installroot):
# cleans up temporary files left by bdb so that differing
# versions of rpm don't cause problems
import glob
for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
os.unlink(f)
def _cleanupZyppJunk(self, installroot):
try:
shutil.rmtree(os.path.join(installroot, '.zypp'))
except:
pass
def setup(self):
self._cleanupRpmdbLocks(self.instroot)
def whatObsolete(self, pkg):
query = zypp.PoolQuery()
query.addKind(zypp.ResKind.package)
query.addAttribute(zypp.SolvAttr.obsoletes, pkg)
query.setMatchExact()
for pi in query.queryResults(self.Z.pool()):
return pi
return None
def _zyppQueryPackage(self, pkg):
query = zypp.PoolQuery()
query.addKind(zypp.ResKind.package)
query.addAttribute(zypp.SolvAttr.name,pkg)
query.setMatchExact()
for pi in query.queryResults(self.Z.pool()):
return pi
return None
def _splitPkgString(self, pkg):
sp = pkg.rsplit(".",1)
name = sp[0]
arch = None
if len(sp) == 2:
arch = sp[1]
sysarch = zypp.Arch(self.target_arch)
if not zypp.Arch(arch).compatible_with (sysarch):
arch = None
name = ".".join(sp)
return name, arch
def selectPackage(self, pkg):
"""Select a given package or package pattern, can be specified
with name.arch or name* or *name
"""
if not self.Z:
self.__initialize_zypp()
def markPoolItem(obs, pi):
if obs == None:
pi.status().setToBeInstalled (zypp.ResStatus.USER)
else:
obs.status().setToBeInstalled (zypp.ResStatus.USER)
def cmpEVR(p1, p2):
# compare criterion: arch compatibility first, then repo
# priority, and version last
a1 = p1.arch()
a2 = p2.arch()
if str(a1) != str(a2):
if a1.compatible_with(a2):
return -1
else:
return 1
# Priority of a repository is an integer value between 0 (the
# highest priority) and 99 (the lowest priority)
pr1 = int(p1.repoInfo().priority())
pr2 = int(p2.repoInfo().priority())
if pr1 > pr2:
return -1
elif pr1 < pr2:
return 1
ed1 = p1.edition()
ed2 = p2.edition()
(e1, v1, r1) = map(str, [ed1.epoch(), ed1.version(), ed1.release()])
(e2, v2, r2) = map(str, [ed2.epoch(), ed2.version(), ed2.release()])
return rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
found = False
startx = pkg.startswith("*")
endx = pkg.endswith("*")
ispattern = startx or endx
name, arch = self._splitPkgString(pkg)
q = zypp.PoolQuery()
q.addKind(zypp.ResKind.package)
if ispattern:
if startx and not endx:
pattern = '%s$' % (pkg[1:])
if endx and not startx:
pattern = '^%s' % (pkg[0:-1])
if endx and startx:
pattern = '%s' % (pkg[1:-1])
q.setMatchRegex()
q.addAttribute(zypp.SolvAttr.name,pattern)
elif arch:
q.setMatchExact()
q.addAttribute(zypp.SolvAttr.name,name)
else:
q.setMatchExact()
q.addAttribute(zypp.SolvAttr.name,pkg)
for pitem in sorted(
q.queryResults(self.Z.pool()),
cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)),
reverse=True):
item = zypp.asKindPackage(pitem)
if item.name() in self.excpkgs.keys() and \
self.excpkgs[item.name()] == item.repoInfo().name():
continue
if item.name() in self.incpkgs.keys() and \
self.incpkgs[item.name()] != item.repoInfo().name():
continue
found = True
obspkg = self.whatObsolete(item.name())
if arch:
if arch == str(item.arch()):
item.status().setToBeInstalled (zypp.ResStatus.USER)
else:
markPoolItem(obspkg, pitem)
if not ispattern:
break
# Can't match using package name, then search from packge
# provides infomation
if found == False and not ispattern:
q.addAttribute(zypp.SolvAttr.provides, pkg)
q.addAttribute(zypp.SolvAttr.name,'')
for pitem in sorted(
q.queryResults(self.Z.pool()),
cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)),
reverse=True):
item = zypp.asKindPackage(pitem)
if item.name() in self.excpkgs.keys() and \
self.excpkgs[item.name()] == item.repoInfo().name():
continue
if item.name() in self.incpkgs.keys() and \
self.incpkgs[item.name()] != item.repoInfo().name():
continue
found = True
obspkg = self.whatObsolete(item.name())
markPoolItem(obspkg, pitem)
break
if found:
return None
else:
raise CreatorError("Unable to find package: %s" % (pkg,))
def inDeselectPackages(self, pitem):
"""check if specified pacakges are in the list of inDeselectPackages
"""
item = zypp.asKindPackage(pitem)
name = item.name()
for pkg in self.to_deselect:
startx = pkg.startswith("*")
endx = pkg.endswith("*")
ispattern = startx or endx
pkgname, pkgarch = self._splitPkgString(pkg)
if not ispattern:
if pkgarch:
if name == pkgname and str(item.arch()) == pkgarch:
return True;
else:
if name == pkgname:
return True;
else:
if startx and name.endswith(pkg[1:]):
return True;
if endx and name.startswith(pkg[:-1]):
return True;
return False;
def deselectPackage(self, pkg):
"""collect packages should not be installed"""
self.to_deselect.append(pkg)
def selectGroup(self, grp, include = ksparser.GROUP_DEFAULT):
if not self.Z:
self.__initialize_zypp()
found = False
q=zypp.PoolQuery()
q.addKind(zypp.ResKind.pattern)
for pitem in q.queryResults(self.Z.pool()):
item = zypp.asKindPattern(pitem)
summary = "%s" % item.summary()
name = "%s" % item.name()
if name == grp or summary == grp:
found = True
pitem.status().setToBeInstalled (zypp.ResStatus.USER)
break
if found:
if include == ksparser.GROUP_REQUIRED:
map(
lambda p: self.deselectPackage(p),
grp.default_packages.keys())
return None
else:
raise CreatorError("Unable to find pattern: %s" % (grp,))
def addRepository(self, name,
url = None,
mirrorlist = None,
proxy = None,
proxy_username = None,
proxy_password = None,
inc = None,
exc = None,
ssl_verify = True,
nocache = False,
cost=None,
priority=None):
# TODO: Handle cost attribute for repos
if not self.repo_manager:
self.__initialize_repo_manager()
if not proxy and url:
proxy = get_proxy_for(url)
repo = RepositoryStub()
repo.name = name
repo.id = name
repo.proxy = proxy
repo.proxy_username = proxy_username
repo.proxy_password = proxy_password
repo.ssl_verify = ssl_verify
repo.nocache = nocache
repo.baseurl.append(url)
if inc:
for pkg in inc:
self.incpkgs[pkg] = name
if exc:
for pkg in exc:
self.excpkgs[pkg] = name
# check LICENSE files
if not rpmmisc.checkRepositoryEULA(name, repo):
msger.warning('skip repo:%s for failed EULA confirmation' % name)
return None
if mirrorlist:
repo.mirrorlist = mirrorlist
# Enable gpg check for verifying corrupt packages
repo.gpgcheck = 1
if priority is not None:
# priority 0 has issue in RepoInfo.setPriority
repo.priority = priority + 1
try:
repo_info = zypp.RepoInfo()
repo_info.setAlias(repo.name)
repo_info.setName(repo.name)
repo_info.setEnabled(repo.enabled)
repo_info.setAutorefresh(repo.autorefresh)
repo_info.setKeepPackages(repo.keeppackages)
baseurl = zypp.Url(repo.baseurl[0])
if not ssl_verify:
baseurl.setQueryParam("ssl_verify", "no")
if proxy:
scheme, host, path, parm, query, frag = urlparse.urlparse(proxy)
proxyinfo = host.split(":")
host = proxyinfo[0]
port = "80"
if len(proxyinfo) > 1:
port = proxyinfo[1]
if proxy.startswith("socks") and len(proxy.rsplit(':', 1)) == 2:
host = proxy.rsplit(':', 1)[0]
port = proxy.rsplit(':', 1)[1]
baseurl.setQueryParam ("proxy", host)
baseurl.setQueryParam ("proxyport", port)
repo.baseurl[0] = baseurl.asCompleteString()
self.repos.append(repo)
repo_info.addBaseUrl(baseurl)
if repo.priority is not None:
repo_info.setPriority(repo.priority)
# this hack is used to change zypp credential file location
# the default one is $HOME/.zypp, which cause conflicts when
# installing some basic packages, and the location doesn't
# have any interface actually, so use a tricky way anyway
homedir = None
if 'HOME' in os.environ:
homedir = os.environ['HOME']
os.environ['HOME'] = '/'
else:
os.environ['HOME'] = '/'
self.repo_manager.addRepository(repo_info)
# save back the $HOME env
if homedir:
os.environ['HOME'] = homedir
else:
del os.environ['HOME']
self.__build_repo_cache(name)
except RuntimeError, e:
raise CreatorError(str(e))
msger.verbose('repo: %s was added' % name)
return repo
def installHasFile(self, file):
return False
def preInstall(self, pkg):
self.pre_pkgs.append(pkg)
def runInstall(self, checksize = 0):
os.environ["HOME"] = "/"
os.environ["LD_PRELOAD"] = ""
self.buildTransaction()
todo = zypp.GetResolvablesToInsDel(self.Z.pool())
installed_pkgs = todo._toInstall
dlpkgs = []
for pitem in installed_pkgs:
if not zypp.isKindPattern(pitem) and \
not self.inDeselectPackages(pitem):
item = zypp.asKindPackage(pitem)
dlpkgs.append(item)
if not self.install_debuginfo or str(item.arch()) == "noarch":
continue
dipkg = self._zyppQueryPackage("%s-debuginfo" % item.name())
if dipkg:
ditem = zypp.asKindPackage(dipkg)
dlpkgs.append(ditem)
else:
msger.warning("No debuginfo rpm found for: %s" \
% item.name())
# record all pkg and the content
localpkgs = self.localpkgs.keys()
for pkg in dlpkgs:
license = ''
if pkg.name() in localpkgs:
hdr = rpmmisc.readRpmHeader(self.ts, self.localpkgs[pkg.name()])
pkg_long_name = misc.RPM_FMT % {
'name': hdr['name'],
'arch': hdr['arch'],
'version': hdr['version'],
'release': hdr['release']
}
license = hdr['license']
else:
pkg_long_name = misc.RPM_FMT % {
'name': pkg.name(),
'arch': pkg.arch(),
'version': pkg.edition().version(),
'release': pkg.edition().release()
}
license = pkg.license()
if license in self.__pkgs_license.keys():
self.__pkgs_license[license].append(pkg_long_name)
else:
self.__pkgs_license[license] = [pkg_long_name]
total_count = len(dlpkgs)
cached_count = 0
download_total_size = sum(map(lambda x: int(x.downloadSize()), dlpkgs))
localpkgs = self.localpkgs.keys()
msger.info("Checking packages cached ...")
for po in dlpkgs:
# Check if it is cached locally
if po.name() in localpkgs:
cached_count += 1
else:
local = self.getLocalPkgPath(po)
name = str(po.repoInfo().name())
try:
repo = filter(lambda r: r.name == name, self.repos)[0]
except IndexError:
repo = None
nocache = repo.nocache if repo else False
if os.path.exists(local):
if nocache or self.checkPkg(local) !=0:
os.unlink(local)
else:
download_total_size -= int(po.downloadSize())
cached_count += 1
cache_avail_size = misc.get_filesystem_avail(self.cachedir)
if cache_avail_size < download_total_size:
raise CreatorError("No enough space used for downloading.")
# record the total size of installed pkgs
install_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs))
# check needed size before actually download and install
# FIXME: for multiple partitions for loop type, check fails
# skip the check temporarily
#if checksize and install_total_size > checksize:
# raise CreatorError("No enough space used for installing, "
# "please resize partition size in ks file")
download_count = total_count - cached_count
msger.info("Packages: %d Total, %d Cached, %d Missed" \
% (total_count, cached_count, download_count))
try:
if download_count > 0:
msger.info("Downloading packages ...")
self.downloadPkgs(dlpkgs, download_count)
self.installPkgs(dlpkgs)
except (RepoError, RpmError):
raise
except Exception, e:
raise CreatorError("Package installation failed: %s" % (e,))
def getVcsInfo(self):
if self.__pkgs_vcsinfo:
return
if not self.ts:
self.__initialize_transaction()
mi = self.ts.dbMatch()
for hdr in mi:
lname = misc.RPM_FMT % {
'name': hdr['name'],
'arch': hdr['arch'],
'version': hdr['version'],
'release': hdr['release']
}
self.__pkgs_vcsinfo[lname] = hdr['VCS']
return self.__pkgs_vcsinfo
def getAllContent(self):
if self.__pkgs_content:
return self.__pkgs_content
if not self.ts:
self.__initialize_transaction()
mi = self.ts.dbMatch()
for hdr in mi:
lname = misc.RPM_FMT % {
'name': hdr['name'],
'arch': hdr['arch'],
'version': hdr['version'],
'release': hdr['release']
}
self.__pkgs_content[lname] = hdr['FILENAMES']
return self.__pkgs_content
def getPkgsLicense(self):
return self.__pkgs_license
def getFilelist(self, pkgname):
if not pkgname:
return None
if not self.ts:
self.__initialize_transaction()
mi = self.ts.dbMatch('name', pkgname)
for header in mi:
return header['FILENAMES']
def __initialize_repo_manager(self):
if self.repo_manager:
return
# Clean up repo metadata
shutil.rmtree(self.cachedir + "/etc", ignore_errors = True)
shutil.rmtree(self.cachedir + "/solv", ignore_errors = True)
shutil.rmtree(self.cachedir + "/raw", ignore_errors = True)
zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE
| zypp.KeyRing.ACCEPT_VERIFICATION_FAILED
| zypp.KeyRing.ACCEPT_UNKNOWNKEY
| zypp.KeyRing.TRUST_KEY_TEMPORARILY
)
self.repo_manager_options = \
zypp.RepoManagerOptions(zypp.Pathname(self.instroot))
self.repo_manager_options.knownReposPath = \
zypp.Pathname(self.cachedir + "/etc/zypp/repos.d")
self.repo_manager_options.repoCachePath = \
zypp.Pathname(self.cachedir)
self.repo_manager_options.repoRawCachePath = \
zypp.Pathname(self.cachedir + "/raw")
self.repo_manager_options.repoSolvCachePath = \
zypp.Pathname(self.cachedir + "/solv")
self.repo_manager_options.repoPackagesCachePath = \
zypp.Pathname(self.cachedir + "/packages")
self.repo_manager = zypp.RepoManager(self.repo_manager_options)
def __build_repo_cache(self, name):
repo = self.repo_manager.getRepositoryInfo(name)
if self.repo_manager.isCached(repo) or not repo.enabled():
return
msger.info('Refreshing repository: %s ...' % name)
self.repo_manager.buildCache(repo, zypp.RepoManager.BuildIfNeeded)
def __initialize_zypp(self):
if self.Z:
return
zconfig = zypp.ZConfig_instance()
# Set system architecture
if self.target_arch:
zconfig.setSystemArchitecture(zypp.Arch(self.target_arch))
msger.info("zypp architecture is <%s>" % zconfig.systemArchitecture())
# repoPackagesCachePath is corrected by this
self.repo_manager = zypp.RepoManager(self.repo_manager_options)
repos = self.repo_manager.knownRepositories()
for repo in repos:
if not repo.enabled():
continue
self.repo_manager.loadFromCache(repo)
self.Z = zypp.ZYppFactory_instance().getZYpp()
self.Z.initializeTarget(zypp.Pathname(self.instroot))
self.Z.target().load()
def buildTransaction(self):
if not self.Z.resolver().resolvePool():
probs = self.Z.resolver().problems()
for problem in probs:
msger.warning("repo problem: %s, %s" \
% (problem.description().decode("utf-8"),
problem.details().decode("utf-8")))
raise RepoError("found %d resolver problem, abort!" \
% len(probs))
def getLocalPkgPath(self, po):
repoinfo = po.repoInfo()
cacheroot = repoinfo.packagesPath()
location= po.location()
rpmpath = str(location.filename())
pkgpath = "%s/%s" % (cacheroot, os.path.basename(rpmpath))
return pkgpath
def installLocal(self, pkg, po=None, updateonly=False):
if not self.ts:
self.__initialize_transaction()
solvfile = "%s/.solv" % (self.cachedir)
rc, out = runner.runtool([fs_related.find_binary_path("rpms2solv"),
pkg])
if rc == 0:
f = open(solvfile, "w+")
f.write(out)
f.close()
warnmsg = self.repo_manager.loadSolvFile(solvfile,
os.path.basename(pkg))
if warnmsg:
msger.warning(warnmsg)
os.unlink(solvfile)
else:
msger.warning('Can not get %s solv data.' % pkg)
hdr = rpmmisc.readRpmHeader(self.ts, pkg)
arch = zypp.Arch(hdr['arch'])
sysarch = zypp.Arch(self.target_arch)
if arch.compatible_with (sysarch):
pkgname = hdr['name']
self.localpkgs[pkgname] = pkg
self.selectPackage(pkgname)
msger.info("Marking %s to be installed" % (pkg))
else:
msger.warning("Cannot add package %s to transaction. "
"Not a compatible architecture: %s" \
% (pkg, hdr['arch']))
def downloadPkgs(self, package_objects, count):
localpkgs = self.localpkgs.keys()
progress_obj = TextProgress(count)
for po in package_objects:
if po.name() in localpkgs:
continue
filename = self.getLocalPkgPath(po)
if os.path.exists(filename):
if self.checkPkg(filename) == 0:
continue
dirn = os.path.dirname(filename)
if not os.path.exists(dirn):
os.makedirs(dirn)
url = self.get_url(po)
proxies = self.get_proxies(po)
try:
filename = myurlgrab(url, filename, proxies, progress_obj)
except CreatorError:
self.close()
raise
def preinstallPkgs(self):
if not self.ts_pre:
self.__initialize_transaction()
self.ts_pre.order()
cb = rpmmisc.RPMInstallCallback(self.ts_pre)
cb.headmsg = "Preinstall"
installlogfile = "%s/__catched_stderr.buf" % (self.instroot)
# start to catch stderr output from librpm
msger.enable_logstderr(installlogfile)
errors = self.ts_pre.run(cb.callback, '')
# stop catch
msger.disable_logstderr()
self.ts_pre.closeDB()
self.ts_pre = None
if errors is not None:
if len(errors) == 0:
msger.warning('scriptlet or other non-fatal errors occurred '
'during transaction.')
else:
for e in errors:
msger.warning(e[0])
raise RepoError('Could not run transaction.')
def installPkgs(self, package_objects):
if not self.ts:
self.__initialize_transaction()
# clean rpm lock
self._cleanupRpmdbLocks(self.instroot)
self._cleanupZyppJunk(self.instroot)
# Set filters
probfilter = 0
for flag in self.probFilterFlags:
probfilter |= flag
self.ts.setProbFilter(probfilter)
self.ts_pre.setProbFilter(probfilter)
localpkgs = self.localpkgs.keys()
for po in package_objects:
pkgname = po.name()
if pkgname in localpkgs:
rpmpath = self.localpkgs[pkgname]
else:
rpmpath = self.getLocalPkgPath(po)
if not os.path.exists(rpmpath):
# Maybe it is a local repo
rpmuri = self.get_url(po)
if rpmuri.startswith("file:/"):
rpmpath = rpmuri[5:]
if not os.path.exists(rpmpath):
raise RpmError("Error: %s doesn't exist" % rpmpath)
h = rpmmisc.readRpmHeader(self.ts, rpmpath)
if pkgname in self.pre_pkgs:
msger.verbose("pre-install package added: %s" % pkgname)
self.ts_pre.addInstall(h, rpmpath, 'u')
self.ts.addInstall(h, rpmpath, 'u')
unresolved_dependencies = self.ts.check()
if not unresolved_dependencies:
if self.pre_pkgs:
self.preinstallPkgs()
self.ts.order()
cb = rpmmisc.RPMInstallCallback(self.ts)
installlogfile = "%s/__catched_stderr.buf" % (self.instroot)
# start to catch stderr output from librpm
msger.enable_logstderr(installlogfile)
errors = self.ts.run(cb.callback, '')
# stop catch
msger.disable_logstderr()
self.ts.closeDB()
self.ts = None
if errors is not None:
if len(errors) == 0:
msger.warning('scriptlet or other non-fatal errors occurred '
'during transaction.')
else:
for e in errors:
msger.warning(e[0])
raise RepoError('Could not run transaction.')
else:
for pkg, need, needflags, sense, key in unresolved_dependencies:
package = '-'.join(pkg)
if needflags == rpm.RPMSENSE_LESS:
deppkg = ' < '.join(need)
elif needflags == rpm.RPMSENSE_EQUAL:
deppkg = ' = '.join(need)
elif needflags == rpm.RPMSENSE_GREATER:
deppkg = ' > '.join(need)
else:
deppkg = '-'.join(need)
if sense == rpm.RPMDEP_SENSE_REQUIRES:
msger.warning("[%s] Requires [%s], which is not provided" \
% (package, deppkg))
elif sense == rpm.RPMDEP_SENSE_CONFLICTS:
msger.warning("[%s] Conflicts with [%s]" %(package,deppkg))
raise RepoError("Unresolved dependencies, transaction failed.")
def __initialize_transaction(self):
if not self.ts:
self.ts = rpm.TransactionSet(self.instroot)
# Set to not verify DSA signatures.
self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
if not self.ts_pre:
self.ts_pre = rpm.TransactionSet(self.instroot)
# Just unpack the files, don't run scripts
self.ts_pre.setFlags(rpm.RPMTRANS_FLAG_ALLFILES | rpm.RPMTRANS_FLAG_NOSCRIPTS)
# Set to not verify DSA signatures.
self.ts_pre.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
def checkPkg(self, pkg):
ret = 1
if not os.path.exists(pkg):
return ret
ret = rpmmisc.checkRpmIntegrity('rpm', pkg)
if ret != 0:
msger.warning("package %s is damaged: %s" \
% (os.path.basename(pkg), pkg))
return ret
def _add_prob_flags(self, *flags):
for flag in flags:
if flag not in self.probFilterFlags:
self.probFilterFlags.append(flag)
def get_proxies(self, pobj):
if not pobj:
return None
proxy = None
proxies = None
repoinfo = pobj.repoInfo()
reponame = "%s" % repoinfo.name()
repos = filter(lambda r: r.name == reponame, self.repos)
repourl = str(repoinfo.baseUrls()[0])
if repos:
proxy = repos[0].proxy
if not proxy:
proxy = get_proxy_for(repourl)
if proxy:
proxies = {str(repourl.split(':')[0]): str(proxy)}
return proxies
def get_url(self, pobj):
if not pobj:
return None
name = str(pobj.repoInfo().name())
try:
repo = filter(lambda r: r.name == name, self.repos)[0]
except IndexError:
return None
baseurl = repo.baseurl[0]
index = baseurl.find("?")
if index > -1:
baseurl = baseurl[:index]
location = pobj.location()
location = str(location.filename())
if location.startswith("./"):
location = location[2:]
return os.path.join(baseurl, location)
def package_url(self, pkgname):
def cmpEVR(p1, p2):
ed1 = p1.edition()
ed2 = p2.edition()
(e1, v1, r1) = map(str, [ed1.epoch(), ed1.version(), ed1.release()])
(e2, v2, r2) = map(str, [ed2.epoch(), ed2.version(), ed2.release()])
return rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
if not self.Z:
self.__initialize_zypp()
q = zypp.PoolQuery()
q.addKind(zypp.ResKind.package)
q.setMatchExact()
q.addAttribute(zypp.SolvAttr.name, pkgname)
items = sorted(q.queryResults(self.Z.pool()),
cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)),
reverse=True)
if items:
item = zypp.asKindPackage(items[0])
url = self.get_url(item)
proxies = self.get_proxies(item)
return (url, proxies)
return (None, None)
########NEW FILE########
__FILENAME__ =
########NEW FILE########
__FILENAME__ = empty_hook
#!/usr/bin/python
# TODO: plugin base for hooks
########NEW FILE########
__FILENAME__ = direct_plugin
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'direct' imager plugin class for 'wic', based
# loosely on the raw imager plugin from 'mic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import os
import shutil
import re
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.direct as direct
from mic.pluginbase import ImagerPlugin
class DirectPlugin(ImagerPlugin):
name = 'direct'
@classmethod
def __rootfs_dir_to_dict(self, rootfs_dirs):
"""
Gets a string that contain 'connection=dir' splitted by
space and return a dict
"""
krootfs_dir = {}
for rootfs_dir in rootfs_dirs.split(' '):
k, v = rootfs_dir.split('=')
krootfs_dir[k] = v
return krootfs_dir
@classmethod
def do_create(self, subcmd, opts, *args):
"""
Create direct image, called from creator as 'direct' cmd
"""
if len(args) != 9:
raise errors.Usage("Extra arguments given")
staging_data_dir = args[0]
hdddir = args[1]
native_sysroot = args[2]
kernel_dir = args[3]
bootimg_dir = args[4]
rootfs_dir = args[5]
creatoropts = configmgr.create
ksconf = args[6]
image_output_dir = args[7]
oe_builddir = args[8]
krootfs_dir = self.__rootfs_dir_to_dict(rootfs_dir)
configmgr._ksconf = ksconf
creator = direct.DirectImageCreator(oe_builddir,
image_output_dir,
krootfs_dir,
bootimg_dir,
kernel_dir,
native_sysroot,
hdddir,
staging_data_dir,
creatoropts,
None,
None,
None)
try:
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
return 0
########NEW FILE########
__FILENAME__ = fs_plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import sys
from mic import chroot, msger
from mic.utils import cmdln, misc, errors, fs_related
from mic.imager import fs
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.pluginbase import ImagerPlugin
class FsPlugin(ImagerPlugin):
name = 'fs'
@classmethod
@cmdln.option("--include-src",
dest="include_src",
action="store_true",
default=False,
help="Generate a image with source rpms included")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create fs image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == 'bootstrap':
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = fs.FsImageCreator(creatoropts, pkgmgr)
creator._include_src = opts.include_src
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
self.check_image_exists(creator.destdir,
creator.pack_to,
[creator.name],
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
#Download the source packages ###private options
if opts.include_src:
installed_pkgs = creator.get_installed_packages()
msger.info('--------------------------------------------------')
msger.info('Generating the image with source rpms included ...')
if not misc.SrcpkgsDownload(installed_pkgs, creatoropts["repomd"], creator._instroot, creatoropts["cachedir"]):
msger.warning("Source packages can't be downloaded")
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(self, target, cmd=[]):#chroot.py parse opts&args
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", target)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(target, None, cmdline)
finally:
chroot.cleanup_after_chroot("dir", None, None, None)
return 1
########NEW FILE########
__FILENAME__ = livecd_plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors
from mic.conf import configmgr
import mic.imager.livecd as livecd
from mic.plugin import pluginmgr
from mic.pluginbase import ImagerPlugin
class LiveCDPlugin(ImagerPlugin):
name = 'livecd'
@classmethod
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create livecd image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == 'bootstrap':
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
if creatoropts['arch'] and creatoropts['arch'].startswith('arm'):
msger.warning('livecd cannot support arm images, Quit')
return
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = livecd.LiveCDImageCreator(creatoropts, pkgmgr)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
self.check_image_exists(creator.destdir,
creator.pack_to,
[creator.name + ".iso"],
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(cls, target, cmd=[]):
os_image = cls.do_unpack(target)
os_image_dir = os.path.dirname(os_image)
# unpack image to target dir
imgsize = misc.get_file_size(os_image) * 1024L * 1024L
imgtype = misc.get_image_type(os_image)
if imgtype == "btrfsimg":
fstype = "btrfs"
myDiskMount = fs_related.BtrfsDiskMount
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
myDiskMount = fs_related.ExtDiskMount
else:
raise errors.CreatorError("Unsupported filesystem type: %s" % fstype)
extmnt = misc.mkdtemp()
extloop = myDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize),
extmnt,
fstype,
4096,
"%s label" % fstype)
try:
extloop.mount()
except errors.MountError:
extloop.cleanup()
shutil.rmtree(extmnt, ignore_errors = True)
shutil.rmtree(os_image_dir, ignore_errors = True)
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", extmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(extmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." %target)
finally:
chroot.cleanup_after_chroot("img", extloop, os_image_dir, extmnt)
@classmethod
def do_pack(cls, base_on):
import subprocess
def __mkinitrd(instance):
kernelver = instance._get_kernel_versions().values()[0][0]
args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ]
try:
subprocess.call(args, preexec_fn = instance._chroot)
except OSError, (err, msg):
raise errors.CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg)
def __run_post_cleanups(instance):
kernelver = instance._get_kernel_versions().values()[0][0]
args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver]
try:
subprocess.call(args, preexec_fn = instance._chroot)
except OSError, (err, msg):
raise errors.CreatorError("Failed to run post cleanups: %s" % msg)
convertoropts = configmgr.convert
convertoropts['name'] = os.path.splitext(os.path.basename(base_on))[0]
convertor = livecd.LiveCDImageCreator(convertoropts)
imgtype = misc.get_image_type(base_on)
if imgtype == "btrfsimg":
fstype = "btrfs"
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
else:
raise errors.CreatorError("Unsupported filesystem type: %s" % fstype)
convertor._set_fstype(fstype)
try:
convertor.mount(base_on)
__mkinitrd(convertor)
convertor._create_bootconfig()
__run_post_cleanups(convertor)
convertor.launch_shell(convertoropts['shell'])
convertor.unmount()
convertor.package()
convertor.print_outimage_info()
finally:
shutil.rmtree(os.path.dirname(base_on), ignore_errors = True)
@classmethod
def do_unpack(cls, srcimg):
img = srcimg
imgmnt = misc.mkdtemp()
imgloop = fs_related.DiskMount(fs_related.LoopbackDisk(img, 0), imgmnt)
try:
imgloop.mount()
except errors.MountError:
imgloop.cleanup()
raise
# legacy LiveOS filesystem layout support, remove for F9 or F10
if os.path.exists(imgmnt + "/squashfs.img"):
squashimg = imgmnt + "/squashfs.img"
else:
squashimg = imgmnt + "/LiveOS/squashfs.img"
tmpoutdir = misc.mkdtemp()
# unsquashfs requires outdir mustn't exist
shutil.rmtree(tmpoutdir, ignore_errors = True)
misc.uncompress_squashfs(squashimg, tmpoutdir)
try:
# legacy LiveOS filesystem layout support, remove for F9 or F10
if os.path.exists(tmpoutdir + "/os.img"):
os_image = tmpoutdir + "/os.img"
else:
os_image = tmpoutdir + "/LiveOS/ext3fs.img"
if not os.path.exists(os_image):
raise errors.CreatorError("'%s' is not a valid live CD ISO : neither "
"LiveOS/ext3fs.img nor os.img exist" %img)
imgname = os.path.basename(srcimg)
imgname = os.path.splitext(imgname)[0] + ".img"
rtimage = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), imgname)
shutil.copyfile(os_image, rtimage)
finally:
imgloop.cleanup()
shutil.rmtree(tmpoutdir, ignore_errors = True)
shutil.rmtree(imgmnt, ignore_errors = True)
return rtimage
########NEW FILE########
__FILENAME__ = liveusb_plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors
from mic.utils.partitionedfs import PartitionedMount
from mic.conf import configmgr
from mic.plugin import pluginmgr
import mic.imager.liveusb as liveusb
from mic.pluginbase import ImagerPlugin
class LiveUSBPlugin(ImagerPlugin):
name = 'liveusb'
@classmethod
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create liveusb image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == "bootstrap":
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
if creatoropts['arch'] and creatoropts['arch'].startswith('arm'):
msger.warning('liveusb cannot support arm images, Quit')
return
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = liveusb.LiveUSBImageCreator(creatoropts, pkgmgr)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
self.check_image_exists(creator.destdir,
creator.pack_to,
[creator.name + ".usbimg"],
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(cls, target, cmd=[]):
os_image = cls.do_unpack(target)
os_image_dir = os.path.dirname(os_image)
# unpack image to target dir
imgsize = misc.get_file_size(os_image) * 1024L * 1024L
imgtype = misc.get_image_type(os_image)
if imgtype == "btrfsimg":
fstype = "btrfs"
myDiskMount = fs_related.BtrfsDiskMount
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
myDiskMount = fs_related.ExtDiskMount
else:
raise errors.CreatorError("Unsupported filesystem type: %s" % fstype)
extmnt = misc.mkdtemp()
extloop = myDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize),
extmnt,
fstype,
4096,
"%s label" % fstype)
try:
extloop.mount()
except errors.MountError:
extloop.cleanup()
shutil.rmtree(extmnt, ignore_errors = True)
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", extmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(extmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." %target)
finally:
chroot.cleanup_after_chroot("img", extloop, os_image_dir, extmnt)
@classmethod
def do_pack(cls, base_on):
import subprocess
def __mkinitrd(instance):
kernelver = instance._get_kernel_versions().values()[0][0]
args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ]
try:
subprocess.call(args, preexec_fn = instance._chroot)
except OSError, (err, msg):
raise errors.CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg)
def __run_post_cleanups(instance):
kernelver = instance._get_kernel_versions().values()[0][0]
args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver]
try:
subprocess.call(args, preexec_fn = instance._chroot)
except OSError, (err, msg):
raise errors.CreatorError("Failed to run post cleanups: %s" % msg)
convertoropts = configmgr.convert
convertoropts['name'] = os.path.splitext(os.path.basename(base_on))[0]
convertor = liveusb.LiveUSBImageCreator(convertoropts)
imgtype = misc.get_image_type(base_on)
if imgtype == "btrfsimg":
fstype = "btrfs"
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
else:
raise errors.CreatorError("Unsupported filesystem type: %s" % fstyp)
convertor._set_fstype(fstype)
try:
convertor.mount(base_on)
__mkinitrd(convertor)
convertor._create_bootconfig()
__run_post_cleanups(convertor)
convertor.launch_shell(convertoropts['shell'])
convertor.unmount()
convertor.package()
convertor.print_outimage_info()
finally:
shutil.rmtree(os.path.dirname(base_on), ignore_errors = True)
@classmethod
def do_unpack(cls, srcimg):
img = srcimg
imgsize = misc.get_file_size(img) * 1024L * 1024L
imgmnt = misc.mkdtemp()
disk = fs_related.SparseLoopbackDisk(img, imgsize)
imgloop = PartitionedMount(imgmnt, skipformat = True)
imgloop.add_disk('/dev/sdb', disk)
imgloop.add_partition(imgsize/1024/1024, "/dev/sdb", "/", "vfat", boot=False)
try:
imgloop.mount()
except errors.MountError:
imgloop.cleanup()
raise
# legacy LiveOS filesystem layout support, remove for F9 or F10
if os.path.exists(imgmnt + "/squashfs.img"):
squashimg = imgmnt + "/squashfs.img"
else:
squashimg = imgmnt + "/LiveOS/squashfs.img"
tmpoutdir = misc.mkdtemp()
# unsquashfs requires outdir mustn't exist
shutil.rmtree(tmpoutdir, ignore_errors = True)
misc.uncompress_squashfs(squashimg, tmpoutdir)
try:
# legacy LiveOS filesystem layout support, remove for F9 or F10
if os.path.exists(tmpoutdir + "/os.img"):
os_image = tmpoutdir + "/os.img"
else:
os_image = tmpoutdir + "/LiveOS/ext3fs.img"
if not os.path.exists(os_image):
raise errors.CreatorError("'%s' is not a valid live CD ISO : neither "
"LiveOS/ext3fs.img nor os.img exist" %img)
imgname = os.path.basename(srcimg)
imgname = os.path.splitext(imgname)[0] + ".img"
rtimage = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), imgname)
shutil.copyfile(os_image, rtimage)
finally:
imgloop.cleanup()
shutil.rmtree(tmpoutdir, ignore_errors = True)
shutil.rmtree(imgmnt, ignore_errors = True)
return rtimage
########NEW FILE########
__FILENAME__ = loop_plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.imager.loop import LoopImageCreator, load_mountpoints
from mic.pluginbase import ImagerPlugin
class LoopPlugin(ImagerPlugin):
name = 'loop'
@classmethod
@cmdln.option("--compress-disk-image", dest="compress_image",
type='choice', choices=("gz", "bz2"), default=None,
help="Same with --compress-image")
# alias to compress-image for compatibility
@cmdln.option("--compress-image", dest="compress_image",
type='choice', choices=("gz", "bz2"), default=None,
help="Compress all loop images with 'gz' or 'bz2'")
@cmdln.option("--shrink", action='store_true', default=False,
help="Whether to shrink loop images to minimal size")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create loop image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == "bootstrap":
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf
# as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'],
creatoropts['release'],
creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = LoopImageCreator(creatoropts,
pkgmgr,
opts.compress_image,
opts.shrink)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
image_names = [creator.name + ".img"]
image_names.extend(creator.get_image_names())
self.check_image_exists(creator.destdir,
creator.pack_to,
image_names,
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf,
creatoropts['outdir'],
creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def _do_chroot_tar(cls, target, cmd=[]):
mountfp_xml = os.path.splitext(target)[0] + '.xml'
if not os.path.exists(mountfp_xml):
raise errors.CreatorError("No mount point file found for this tar "
"image, please check %s" % mountfp_xml)
import tarfile
tar = tarfile.open(target, 'r')
tmpdir = misc.mkdtemp()
tar.extractall(path=tmpdir)
tar.close()
mntdir = misc.mkdtemp()
loops = []
for (mp, label, name, size, fstype) in load_mountpoints(mountfp_xml):
if fstype in ("ext2", "ext3", "ext4"):
myDiskMount = fs_related.ExtDiskMount
elif fstype == "btrfs":
myDiskMount = fs_related.BtrfsDiskMount
elif fstype in ("vfat", "msdos"):
myDiskMount = fs_related.VfatDiskMount
else:
msger.error("Cannot support fstype: %s" % fstype)
name = os.path.join(tmpdir, name)
size = size * 1024L * 1024L
loop = myDiskMount(fs_related.SparseLoopbackDisk(name, size),
os.path.join(mntdir, mp.lstrip('/')),
fstype, size, label)
try:
msger.verbose("Mount %s to %s" % (mp, mntdir + mp))
fs_related.makedirs(os.path.join(mntdir, mp.lstrip('/')))
loop.mount()
except:
loop.cleanup()
for lp in reversed(loops):
chroot.cleanup_after_chroot("img", lp, None, mntdir)
shutil.rmtree(tmpdir, ignore_errors=True)
raise
loops.append(loop)
try:
if len(cmd) != 0:
cmdline = "/usr/bin/env HOME=/root " + ' '.join(cmd)
else:
cmdline = "/usr/bin/env HOME=/root /bin/bash"
chroot.chroot(mntdir, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." % target)
finally:
for loop in reversed(loops):
chroot.cleanup_after_chroot("img", loop, None, mntdir)
shutil.rmtree(tmpdir, ignore_errors=True)
@classmethod
def do_chroot(cls, target, cmd=[]):
if target.endswith('.tar'):
import tarfile
if tarfile.is_tarfile(target):
LoopPlugin._do_chroot_tar(target, cmd)
return
else:
raise errors.CreatorError("damaged tarball for loop images")
img = target
imgsize = misc.get_file_size(img) * 1024L * 1024L
imgtype = misc.get_image_type(img)
if imgtype == "btrfsimg":
fstype = "btrfs"
myDiskMount = fs_related.BtrfsDiskMount
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
myDiskMount = fs_related.ExtDiskMount
else:
raise errors.CreatorError("Unsupported filesystem type: %s" \
% imgtype)
extmnt = misc.mkdtemp()
extloop = myDiskMount(fs_related.SparseLoopbackDisk(img, imgsize),
extmnt,
fstype,
4096,
"%s label" % fstype)
try:
extloop.mount()
except errors.MountError:
extloop.cleanup()
shutil.rmtree(extmnt, ignore_errors=True)
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", extmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(extmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." % img)
finally:
chroot.cleanup_after_chroot("img", extloop, None, extmnt)
@classmethod
def do_unpack(cls, srcimg):
image = os.path.join(tempfile.mkdtemp(dir="/var/tmp", prefix="tmp"),
"target.img")
msger.info("Copying file system ...")
shutil.copyfile(srcimg, image)
return image
########NEW FILE########
__FILENAME__ = raw_plugin
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import re
import tempfile
from mic import chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.raw as raw
from mic.pluginbase import ImagerPlugin
class RawPlugin(ImagerPlugin):
name = 'raw'
@classmethod
@cmdln.option("--compress-disk-image", dest="compress_image", type='choice',
choices=("gz", "bz2"), default=None,
help="Same with --compress-image")
@cmdln.option("--compress-image", dest="compress_image", type='choice',
choices=("gz", "bz2"), default = None,
help="Compress all raw images before package")
@cmdln.option("--generate-bmap", action="store_true", default = None,
help="also generate the block map file")
@cmdln.option("--fstab-entry", dest="fstab_entry", type='choice',
choices=("name", "uuid"), default="uuid",
help="Set fstab entry, 'name' means using device names, "
"'uuid' means using filesystem uuid")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create raw image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == "bootstrap":
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = raw.RawImageCreator(creatoropts, pkgmgr, opts.compress_image,
opts.generate_bmap, opts.fstab_entry)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
images = ["%s-%s.raw" % (creator.name, disk_name)
for disk_name in creator.get_disk_names()]
self.check_image_exists(creator.destdir,
creator.pack_to,
images,
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.generate_bmap()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(cls, target, cmd=[]):
img = target
imgsize = misc.get_file_size(img) * 1024L * 1024L
partedcmd = fs_related.find_binary_path("parted")
disk = fs_related.SparseLoopbackDisk(img, imgsize)
imgmnt = misc.mkdtemp()
imgloop = PartitionedMount(imgmnt, skipformat = True)
imgloop.add_disk('/dev/sdb', disk)
img_fstype = "ext3"
msger.info("Partition Table:")
partnum = []
for line in runner.outs([partedcmd, "-s", img, "print"]).splitlines():
# no use strip to keep line output here
if "Number" in line:
msger.raw(line)
if line.strip() and line.strip()[0].isdigit():
partnum.append(line.strip()[0])
msger.raw(line)
rootpart = None
if len(partnum) > 1:
rootpart = msger.choice("please choose root partition", partnum)
# Check the partitions from raw disk.
# if choose root part, the mark it as mounted
if rootpart:
root_mounted = True
else:
root_mounted = False
partition_mounts = 0
for line in runner.outs([partedcmd,"-s",img,"unit","B","print"]).splitlines():
line = line.strip()
# Lines that start with number are the partitions,
# because parted can be translated we can't refer to any text lines.
if not line or not line[0].isdigit():
continue
# Some vars have extra , as list seperator.
line = line.replace(",","")
# Example of parted output lines that are handled:
# Number Start End Size Type File system Flags
# 1 512B 3400000511B 3400000000B primary
# 2 3400531968B 3656384511B 255852544B primary linux-swap(v1)
# 3 3656384512B 3720347647B 63963136B primary fat16 boot, lba
partition_info = re.split("\s+",line)
size = partition_info[3].split("B")[0]
if len(partition_info) < 6 or partition_info[5] in ["boot"]:
# No filesystem can be found from partition line. Assuming
# btrfs, because that is the only MeeGo fs that parted does
# not recognize properly.
# TODO: Can we make better assumption?
fstype = "btrfs"
elif partition_info[5] in ["ext2","ext3","ext4","btrfs"]:
fstype = partition_info[5]
elif partition_info[5] in ["fat16","fat32"]:
fstype = "vfat"
elif "swap" in partition_info[5]:
fstype = "swap"
else:
raise errors.CreatorError("Could not recognize partition fs type '%s'." % partition_info[5])
if rootpart and rootpart == line[0]:
mountpoint = '/'
elif not root_mounted and fstype in ["ext2","ext3","ext4","btrfs"]:
# TODO: Check that this is actually the valid root partition from /etc/fstab
mountpoint = "/"
root_mounted = True
elif fstype == "swap":
mountpoint = "swap"
else:
# TODO: Assing better mount points for the rest of the partitions.
partition_mounts += 1
mountpoint = "/media/partition_%d" % partition_mounts
if "boot" in partition_info:
boot = True
else:
boot = False
msger.verbose("Size: %s Bytes, fstype: %s, mountpoint: %s, boot: %s" % (size, fstype, mountpoint, boot))
# TODO: add_partition should take bytes as size parameter.
imgloop.add_partition((int)(size)/1024/1024, "/dev/sdb", mountpoint, fstype = fstype, boot = boot)
try:
imgloop.mount()
except errors.MountError:
imgloop.cleanup()
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", imgmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(imgmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." %img)
finally:
chroot.cleanup_after_chroot("img", imgloop, None, imgmnt)
@classmethod
def do_unpack(cls, srcimg):
srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L
srcmnt = misc.mkdtemp("srcmnt")
disk = fs_related.SparseLoopbackDisk(srcimg, srcimgsize)
srcloop = PartitionedMount(srcmnt, skipformat = True)
srcloop.add_disk('/dev/sdb', disk)
srcloop.add_partition(srcimgsize/1024/1024, "/dev/sdb", "/", "ext3", boot=False)
try:
srcloop.mount()
except errors.MountError:
srcloop.cleanup()
raise
image = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), "target.img")
args = ['dd', "if=%s" % srcloop.partitions[0]['device'], "of=%s" % image]
msger.info("`dd` image ...")
rc = runner.show(args)
srcloop.cleanup()
shutil.rmtree(os.path.dirname(srcmnt), ignore_errors = True)
if rc != 0:
raise errors.CreatorError("Failed to dd")
else:
return image
########NEW FILE########
__FILENAME__ = bootimg-efi
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import os
import shutil
import re
import tempfile
from mic import kickstart, chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.direct as direct
from mic.pluginbase import SourcePlugin
from mic.utils.oe.misc import *
from mic.imager.direct import DirectImageCreator
class BootimgEFIPlugin(SourcePlugin):
name = 'bootimg-efi'
@classmethod
def do_configure_partition(self, part, cr, cr_workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called before do_prepare_partition(), creates grubefi config
"""
hdddir = "%s/hdd/boot" % cr_workdir
rm_cmd = "rm -rf %s" % cr_workdir
exec_cmd(rm_cmd)
install_cmd = "install -d %s/EFI/BOOT" % hdddir
tmp = exec_cmd(install_cmd)
splash = os.path.join(cr_workdir, "/EFI/boot/splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
splashline = ""
(rootdev, root_part_uuid) = cr._get_boot_config()
options = cr.ks.handler.bootloader.appendLine
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
timeout = kickstart.get_timeout(cr.ks)
if not timeout:
timeout = 0
grubefi_conf += "timeout=%s\n" % timeout
grubefi_conf += "menuentry 'boot'{\n"
kernel = "/vmlinuz"
if cr._ptable_format == 'msdos':
rootstr = rootdev
else:
if not root_part_uuid:
raise MountError("Cannot find the root GPT partition UUID")
rootstr = "PARTUUID=%s" % root_part_uuid
grubefi_conf += "linux %s root=%s rootwait %s\n" \
% (kernel, rootstr, options)
grubefi_conf += "}\n"
if splashline:
syslinux_conf += "%s\n" % splashline
msger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg" \
% cr_workdir)
cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
cfg.write(grubefi_conf)
cfg.close()
@classmethod
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for an EFI (grub) boot partition.
"""
if not bootimg_dir:
bootimg_dir = get_bitbake_var("HDDDIR")
if not bootimg_dir:
msger.error("Couldn't find HDDDIR, exiting\n")
# just so the result notes display it
cr.set_bootimg_dir(bootimg_dir)
staging_kernel_dir = kernel_dir
staging_data_dir = bootimg_dir
hdddir = "%s/hdd" % cr_workdir
install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
(staging_kernel_dir, hdddir)
tmp = exec_cmd(install_cmd)
shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
"%s/grub.cfg" % cr_workdir)
cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (staging_data_dir, hdddir)
exec_cmd(cp_cmd, True)
shutil.move("%s/grub.cfg" % cr_workdir,
"%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
du_cmd = "du -bks %s" % hdddir
rc, out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
extra_blocks = part.get_extra_block_count(blocks)
if extra_blocks < BOOTDD_EXTRA_SPACE:
extra_blocks = BOOTDD_EXTRA_SPACE
blocks += extra_blocks
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, part.mountpoint, blocks))
# Ensure total sectors is an integral number of sectors per
# track or mcopy will complain. Sectors are 512 bytes, and we
# generate images with 32 sectors per track. This calculation is
# done in blocks, thus the mod by 16 instead of 32.
blocks += (16 - (blocks % 16))
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
dosfs_cmd = "mkdosfs -n efi -C %s %d" % (bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % bootimg
exec_cmd(chmod_cmd)
du_cmd = "du -Lbms %s" % bootimg
rc, out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
part.set_size(bootimg_size)
part.set_source_file(bootimg)
########NEW FILE########
__FILENAME__ = bootimg-pcbios
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import os
import shutil
import re
import tempfile
from mic import kickstart, chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.direct as direct
from mic.pluginbase import SourcePlugin
from mic.utils.oe.misc import *
from mic.imager.direct import DirectImageCreator
class BootimgPcbiosPlugin(SourcePlugin):
name = 'bootimg-pcbios'
@classmethod
def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. In this case, we install the MBR.
"""
mbrfile = "%s/syslinux/" % bootimg_dir
if cr._ptable_format == 'gpt':
mbrfile += "gptmbr.bin"
else:
mbrfile += "mbr.bin"
if not os.path.exists(mbrfile):
msger.error("Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile)
full_path = cr._full_path(workdir, disk_name, "direct")
msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
% (disk_name, full_path, disk['min_size']))
rc = runner.show(['dd', 'if=%s' % mbrfile,
'of=%s' % full_path, 'conv=notrunc'])
if rc != 0:
raise MountError("Unable to set MBR to %s" % full_path)
@classmethod
def do_configure_partition(self, part, cr, cr_workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called before do_prepare_partition(), creates syslinux config
"""
hdddir = "%s/hdd/boot" % cr_workdir
rm_cmd = "rm -rf " + cr_workdir
exec_cmd(rm_cmd)
install_cmd = "install -d %s" % hdddir
tmp = exec_cmd(install_cmd)
splash = os.path.join(cr_workdir, "/hdd/boot/splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
splashline = ""
(rootdev, root_part_uuid) = cr._get_boot_config()
options = cr.ks.handler.bootloader.appendLine
syslinux_conf = ""
syslinux_conf += "PROMPT 0\n"
timeout = kickstart.get_timeout(cr.ks)
if not timeout:
timeout = 0
syslinux_conf += "TIMEOUT " + str(timeout) + "\n"
syslinux_conf += "\n"
syslinux_conf += "ALLOWOPTIONS 1\n"
syslinux_conf += "SERIAL 0 115200\n"
syslinux_conf += "\n"
if splashline:
syslinux_conf += "%s\n" % splashline
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
kernel = "/vmlinuz"
syslinux_conf += "KERNEL " + kernel + "\n"
if cr._ptable_format == 'msdos':
rootstr = rootdev
else:
if not root_part_uuid:
raise MountError("Cannot find the root GPT partition UUID")
rootstr = "PARTUUID=%s" % root_part_uuid
syslinux_conf += "APPEND label=boot root=%s %s\n" % (rootstr, options)
msger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg" \
% cr_workdir)
cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
cfg.write(syslinux_conf)
cfg.close()
@classmethod
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for legacy bios boot partition.
"""
if not bootimg_dir:
bootimg_dir = get_bitbake_var("STAGING_DATADIR")
if not bootimg_dir:
msger.error("Couldn't find STAGING_DATADIR, exiting\n")
# just so the result notes display it
cr.set_bootimg_dir(bootimg_dir)
staging_kernel_dir = kernel_dir
staging_data_dir = bootimg_dir
hdddir = "%s/hdd/boot" % cr_workdir
install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \
% (staging_kernel_dir, hdddir)
tmp = exec_cmd(install_cmd)
install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \
% (staging_data_dir, hdddir)
tmp = exec_cmd(install_cmd)
du_cmd = "du -bks %s" % hdddir
rc, out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
extra_blocks = part.get_extra_block_count(blocks)
if extra_blocks < BOOTDD_EXTRA_SPACE:
extra_blocks = BOOTDD_EXTRA_SPACE
blocks += extra_blocks
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, part.mountpoint, blocks))
# Ensure total sectors is an integral number of sectors per
# track or mcopy will complain. Sectors are 512 bytes, and we
# generate images with 32 sectors per track. This calculation is
# done in blocks, thus the mod by 16 instead of 32.
blocks += (16 - (blocks % 16))
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
dosfs_cmd = "mkdosfs -n boot -S 512 -C %s %d" % (bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
exec_native_cmd(mcopy_cmd, native_sysroot)
syslinux_cmd = "syslinux %s" % bootimg
exec_native_cmd(syslinux_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % bootimg
exec_cmd(chmod_cmd)
du_cmd = "du -Lbms %s" % bootimg
rc, out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
part.set_size(bootimg_size)
part.set_source_file(bootimg)
########NEW FILE########
__FILENAME__ = rootfs
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
# Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
#
import os
import shutil
import re
import tempfile
from mic import kickstart, chroot, msger
from mic.utils import misc, fs_related, errors, runner, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.utils.partitionedfs import PartitionedMount
import mic.imager.direct as direct
from mic.pluginbase import SourcePlugin
from mic.utils.oe.misc import *
from mic.imager.direct import DirectImageCreator
class RootfsPlugin(SourcePlugin):
name = 'rootfs'
@staticmethod
def __get_rootfs_dir(rootfs_dir):
if os.path.isdir(rootfs_dir):
return rootfs_dir
bitbake_env_lines = find_bitbake_env_lines(rootfs_dir)
if not bitbake_env_lines:
msg = "Couldn't get bitbake environment, exiting."
msger.error(msg)
image_rootfs_dir = find_artifact(bitbake_env_lines, "IMAGE_ROOTFS")
if not os.path.isdir(image_rootfs_dir):
msg = "No valid artifact IMAGE_ROOTFS from image named"
msg += " %s has been found at %s, exiting.\n" % \
(rootfs_dir, image_rootfs_dir)
msger.error(msg)
return image_rootfs_dir
@classmethod
def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, krootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for legacy bios boot partition.
"""
if part.rootfs is None:
if not 'ROOTFS_DIR' in krootfs_dir:
msg = "Couldn't find --rootfs-dir, exiting"
msger.error(msg)
rootfs_dir = krootfs_dir['ROOTFS_DIR']
else:
if part.rootfs in krootfs_dir:
rootfs_dir = krootfs_dir[part.rootfs]
elif part.rootfs:
rootfs_dir = part.rootfs
else:
msg = "Couldn't find --rootfs-dir=%s connection"
msg += " or it is not a valid path, exiting"
msger.error(msg % part.rootfs)
real_rootfs_dir = self.__get_rootfs_dir(rootfs_dir)
part.set_rootfs(real_rootfs_dir)
part.prepare_rootfs(cr_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
########NEW FILE########
__FILENAME__ = rt_util
#!/usr/bin/python -tt
#
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import os
import sys
import glob
import re
import shutil
import subprocess
from mic import bootstrap, msger
from mic.conf import configmgr
from mic.utils import errors, proxy
from mic.utils.fs_related import find_binary_path, makedirs
from mic.chroot import setup_chrootenv, cleanup_chrootenv
expath = lambda p: os.path.abspath(os.path.expanduser(p))
def bootstrap_mic(argv=None):
def mychroot():
os.chroot(rootdir)
os.chdir(cwd)
# by default, sys.argv is used to run mic in bootstrap
if not argv:
argv = sys.argv
if argv[0] not in ('/usr/bin/mic', 'mic'):
argv[0] = '/usr/bin/mic'
cropts = configmgr.create
bsopts = configmgr.bootstrap
distro = bsopts['distro_name'].lower()
rootdir = bsopts['rootdir']
pkglist = bsopts['packages']
cwd = os.getcwd()
# create bootstrap and run mic in bootstrap
bsenv = bootstrap.Bootstrap(rootdir, distro, cropts['arch'])
bsenv.logfile = cropts['logfile']
# rootdir is regenerated as a temp dir
rootdir = bsenv.rootdir
if 'optional' in bsopts:
optlist = bsopts['optional']
else:
optlist = []
try:
msger.info("Creating %s bootstrap ..." % distro)
bsenv.create(cropts['repomd'], pkglist, optlist)
# bootstrap is relocated under "bootstrap"
if os.path.exists(os.path.join(rootdir, "bootstrap")):
rootdir = os.path.join(rootdir, "bootstrap")
bsenv.dirsetup(rootdir)
sync_mic(rootdir)
#FIXME: sync the ks file to bootstrap
if "/" == os.path.dirname(os.path.abspath(configmgr._ksconf)):
safecopy(configmgr._ksconf, rootdir)
msger.info("Start mic in bootstrap: %s\n" % rootdir)
bindmounts = get_bindmounts(cropts)
ret = bsenv.run(argv, cwd, rootdir, bindmounts)
except errors.BootstrapError, err:
msger.warning('\n%s' % err)
if msger.ask("Switch to native mode and continue?"):
return
raise
except RuntimeError, err:
#change exception type but keep the trace back
value, tb = sys.exc_info()[1:]
raise errors.BootstrapError, value, tb
else:
sys.exit(ret)
finally:
bsenv.cleanup()
def get_bindmounts(cropts):
binddirs = [
os.getcwd(),
cropts['tmpdir'],
cropts['cachedir'],
cropts['outdir'],
cropts['local_pkgs_path'],
]
bindfiles = [
cropts['logfile'],
configmgr._ksconf,
]
for lrepo in cropts['localrepos']:
binddirs.append(lrepo)
bindlist = map(expath, filter(None, binddirs))
bindlist += map(os.path.dirname, map(expath, filter(None, bindfiles)))
bindlist = sorted(set(bindlist))
bindmounts = ';'.join(bindlist)
return bindmounts
def get_mic_binpath():
fp = None
try:
import pkg_resources # depends on 'setuptools'
except ImportError:
pass
else:
dist = pkg_resources.get_distribution('mic')
# the real script is under EGG_INFO/scripts
if dist.has_metadata('scripts/mic'):
fp = os.path.join(dist.egg_info, "scripts/mic")
if fp:
return fp
# not found script if 'flat' egg installed
try:
return find_binary_path('mic')
except errors.CreatorError:
raise errors.BootstrapError("Can't find mic binary in host OS")
def get_mic_modpath():
try:
import mic
except ImportError:
raise errors.BootstrapError("Can't find mic module in host OS")
path = os.path.abspath(mic.__file__)
return os.path.dirname(path)
def get_mic_libpath():
# TBD: so far mic lib path is hard coded
return "/usr/lib/mic"
# the hard code path is prepared for bootstrap
def sync_mic(bootstrap, binpth = '/usr/bin/mic',
libpth='/usr/lib',
pylib = '/usr/lib/python2.7/site-packages',
conf = '/etc/mic/mic.conf'):
_path = lambda p: os.path.join(bootstrap, p.lstrip('/'))
micpaths = {
'binpth': get_mic_binpath(),
'libpth': get_mic_libpath(),
'pylib': get_mic_modpath(),
'conf': '/etc/mic/mic.conf',
}
if not os.path.exists(_path(pylib)):
pyptn = '/usr/lib/python?.?/site-packages'
pylibs = glob.glob(_path(pyptn))
if pylibs:
pylib = pylibs[0].replace(bootstrap, '')
else:
raise errors.BootstrapError("Can't find python site dir in: %s" %
bootstrap)
for key, value in micpaths.items():
try:
safecopy(value, _path(eval(key)), False, ["*.pyc", "*.pyo"])
except (OSError, IOError), err:
raise errors.BootstrapError(err)
# auto select backend
conf_str = file(_path(conf)).read()
conf_str = re.sub("pkgmgr\s*=\s*.*", "pkgmgr=auto", conf_str)
with open(_path(conf), 'w') as wf:
wf.write(conf_str)
# chmod +x /usr/bin/mic
os.chmod(_path(binpth), 0777)
# correct python interpreter
mic_cont = file(_path(binpth)).read()
mic_cont = "#!/usr/bin/python\n" + mic_cont
with open(_path(binpth), 'w') as wf:
wf.write(mic_cont)
def safecopy(src, dst, symlinks=False, ignore_ptns=()):
if os.path.isdir(src):
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if os.path.exists(dst):
shutil.rmtree(dst, ignore_errors=True)
src = src.rstrip('/')
# check common prefix to ignore copying itself
if dst.startswith(src + '/'):
ignore_ptns = list(ignore_ptns) + [ os.path.basename(src) ]
ignores = shutil.ignore_patterns(*ignore_ptns)
try:
shutil.copytree(src, dst, symlinks, ignores)
except (OSError, IOError):
shutil.rmtree(dst, ignore_errors=True)
raise
else:
if not os.path.isdir(dst):
makedirs(os.path.dirname(dst))
shutil.copy2(src, dst)
########NEW FILE########
__FILENAME__ = BmapCreate
""" This module implements the block map (bmap) creation functionality and
provides the corresponding API in form of the 'BmapCreate' class.
The idea is that while images files may generally be very large (e.g., 4GiB),
they may nevertheless contain only little real data, e.g., 512MiB. This data
are files, directories, file-system meta-data, partition table, etc. When
copying the image to the target device, you do not have to copy all the 4GiB of
data, you can copy only 512MiB of it, which is 4 times less, so copying should
presumably be 4 times faster.
The block map file is an XML file which contains a list of blocks which have to
be copied to the target device. The other blocks are not used and there is no
need to copy them. The XML file also contains some additional information like
block size, image size, count of mapped blocks, etc. There are also many
commentaries, so it is human-readable.
The image has to be a sparse file. Generally, this means that when you generate
this image file, you should start with a huge sparse file which contains a
single hole spanning the entire file. Then you should partition it, write all
the data (probably by means of loop-back mounting the image or parts of it),
etc. The end result should be a sparse file where mapped areas represent useful
parts of the image and holes represent useless parts of the image, which do not
have to be copied when copying the image to the target device.
This module uses the FIBMAP ioctl to detect holes. """
# Disable the following pylint recommendations:
# * Too many instance attributes - R0902
# * Too few public methods - R0903
# pylint: disable=R0902,R0903
import hashlib
from mic.utils.misc import human_size
from mic.utils import Fiemap
# The bmap format version we generate
SUPPORTED_BMAP_VERSION = "1.3"
_BMAP_START_TEMPLATE = \
"""<?xml version="1.0" ?>
<!-- This file contains the block map for an image file, which is basically
a list of useful (mapped) block numbers in the image file. In other words,
it lists only those blocks which contain data (boot sector, partition
table, file-system metadata, files, directories, extents, etc). These
blocks have to be copied to the target device. The other blocks do not
contain any useful data and do not have to be copied to the target
device.
The block map an optimization which allows to copy or flash the image to
the image quicker than copying of flashing the entire image. This is
because with bmap less data is copied: <MappedBlocksCount> blocks instead
of <BlocksCount> blocks.
Besides the machine-readable data, this file contains useful commentaries
which contain human-readable information like image size, percentage of
mapped data, etc.
The 'version' attribute is the block map file format version in the
'major.minor' format. The version major number is increased whenever an
incompatible block map format change is made. The minor number changes
in case of minor backward-compatible changes. -->
<bmap version="%s">
<!-- Image size in bytes: %s -->
<ImageSize> %u </ImageSize>
<!-- Size of a block in bytes -->
<BlockSize> %u </BlockSize>
<!-- Count of blocks in the image file -->
<BlocksCount> %u </BlocksCount>
"""
class Error(Exception):
""" A class for exceptions generated by this module. We currently support
only one type of exceptions, and we basically throw human-readable problem
description in case of errors. """
pass
class BmapCreate:
""" This class implements the bmap creation functionality. To generate a
bmap for an image (which is supposedly a sparse file), you should first
create an instance of 'BmapCreate' and provide:
* full path or a file-like object of the image to create bmap for
* full path or a file object to use for writing the results to
Then you should invoke the 'generate()' method of this class. It will use
the FIEMAP ioctl to generate the bmap. """
def _open_image_file(self):
""" Open the image file. """
try:
self._f_image = open(self._image_path, 'rb')
except IOError as err:
raise Error("cannot open image file '%s': %s" \
% (self._image_path, err))
self._f_image_needs_close = True
def _open_bmap_file(self):
""" Open the bmap file. """
try:
self._f_bmap = open(self._bmap_path, 'w+')
except IOError as err:
raise Error("cannot open bmap file '%s': %s" \
% (self._bmap_path, err))
self._f_bmap_needs_close = True
def __init__(self, image, bmap):
""" Initialize a class instance:
* image - full path or a file-like object of the image to create bmap
for
* bmap - full path or a file object to use for writing the resulting
bmap to """
self.image_size = None
self.image_size_human = None
self.block_size = None
self.blocks_cnt = None
self.mapped_cnt = None
self.mapped_size = None
self.mapped_size_human = None
self.mapped_percent = None
self._mapped_count_pos1 = None
self._mapped_count_pos2 = None
self._sha1_pos = None
self._f_image_needs_close = False
self._f_bmap_needs_close = False
if hasattr(image, "read"):
self._f_image = image
self._image_path = image.name
else:
self._image_path = image
self._open_image_file()
if hasattr(bmap, "read"):
self._f_bmap = bmap
self._bmap_path = bmap.name
else:
self._bmap_path = bmap
self._open_bmap_file()
self.fiemap = Fiemap.Fiemap(self._f_image)
self.image_size = self.fiemap.image_size
self.image_size_human = human_size(self.image_size)
if self.image_size == 0:
raise Error("cannot generate bmap for zero-sized image file '%s'" \
% self._image_path)
self.block_size = self.fiemap.block_size
self.blocks_cnt = self.fiemap.blocks_cnt
def _bmap_file_start(self):
""" A helper function which generates the starting contents of the
block map file: the header comment, image size, block size, etc. """
# We do not know the amount of mapped blocks at the moment, so just put
# whitespaces instead of real numbers. Assume the longest possible
# numbers.
mapped_count = ' ' * len(str(self.image_size))
mapped_size_human = ' ' * len(self.image_size_human)
xml = _BMAP_START_TEMPLATE \
% (SUPPORTED_BMAP_VERSION, self.image_size_human,
self.image_size, self.block_size, self.blocks_cnt)
xml += " <!-- Count of mapped blocks: "
self._f_bmap.write(xml)
self._mapped_count_pos1 = self._f_bmap.tell()
# Just put white-spaces instead of real information about mapped blocks
xml = "%s or %.1f -->\n" % (mapped_size_human, 100.0)
xml += " <MappedBlocksCount> "
self._f_bmap.write(xml)
self._mapped_count_pos2 = self._f_bmap.tell()
xml = "%s </MappedBlocksCount>\n\n" % mapped_count
# pylint: disable=C0301
xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n"
xml += " the SHA1 checksum has be zeoro (40 ASCII \"0\" symbols). -->\n"
xml += " <BmapFileSHA1> "
self._f_bmap.write(xml)
self._sha1_pos = self._f_bmap.tell()
xml = "0" * 40 + " </BmapFileSHA1>\n\n"
xml += " <!-- The block map which consists of elements which may either be a\n"
xml += " range of blocks or a single block. The 'sha1' attribute (if present)\n"
xml += " is the SHA1 checksum of this blocks range. -->\n"
xml += " <BlockMap>\n"
# pylint: enable=C0301
self._f_bmap.write(xml)
def _bmap_file_end(self):
""" A helper function which generates the final parts of the block map
file: the ending tags and the information about the amount of mapped
blocks. """
xml = " </BlockMap>\n"
xml += "</bmap>\n"
self._f_bmap.write(xml)
self._f_bmap.seek(self._mapped_count_pos1)
self._f_bmap.write("%s or %.1f%%" % \
(self.mapped_size_human, self.mapped_percent))
self._f_bmap.seek(self._mapped_count_pos2)
self._f_bmap.write("%u" % self.mapped_cnt)
self._f_bmap.seek(0)
sha1 = hashlib.sha1(self._f_bmap.read()).hexdigest()
self._f_bmap.seek(self._sha1_pos)
self._f_bmap.write("%s" % sha1)
def _calculate_sha1(self, first, last):
""" A helper function which calculates SHA1 checksum for the range of
blocks of the image file: from block 'first' to block 'last'. """
start = first * self.block_size
end = (last + 1) * self.block_size
self._f_image.seek(start)
hash_obj = hashlib.new("sha1")
chunk_size = 1024*1024
to_read = end - start
read = 0
while read < to_read:
if read + chunk_size > to_read:
chunk_size = to_read - read
chunk = self._f_image.read(chunk_size)
hash_obj.update(chunk)
read += chunk_size
return hash_obj.hexdigest()
def generate(self, include_checksums = True):
""" Generate bmap for the image file. If 'include_checksums' is 'True',
also generate SHA1 checksums for block ranges. """
# Save image file position in order to restore it at the end
image_pos = self._f_image.tell()
self._bmap_file_start()
# Generate the block map and write it to the XML block map
# file as we go.
self.mapped_cnt = 0
for first, last in self.fiemap.get_mapped_ranges(0, self.blocks_cnt):
self.mapped_cnt += last - first + 1
if include_checksums:
sha1 = self._calculate_sha1(first, last)
sha1 = " sha1=\"%s\"" % sha1
else:
sha1 = ""
if first != last:
self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \
% (sha1, first, last))
else:
self._f_bmap.write(" <Range%s> %s </Range>\n" \
% (sha1, first))
self.mapped_size = self.mapped_cnt * self.block_size
self.mapped_size_human = human_size(self.mapped_size)
self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt
self._bmap_file_end()
try:
self._f_bmap.flush()
except IOError as err:
raise Error("cannot flush the bmap file '%s': %s" \
% (self._bmap_path, err))
self._f_image.seek(image_pos)
def __del__(self):
""" The class destructor which closes the opened files. """
if self._f_image_needs_close:
self._f_image.close()
if self._f_bmap_needs_close:
self._f_bmap.close()
########NEW FILE########
__FILENAME__ = cmdln
#!/usr/bin/env python
# Copyright (c) 2002-2007 ActiveState Software Inc.
# License: MIT (see LICENSE.txt for license details)
# Author: Trent Mick
# Home: http://trentm.com/projects/cmdln/
"""An improvement on Python's standard cmd.py module.
As with cmd.py, this module provides "a simple framework for writing
line-oriented command intepreters." This module provides a 'RawCmdln'
class that fixes some design flaws in cmd.Cmd, making it more scalable
and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
or simple shells. And it provides a 'Cmdln' class that add
optparse-based option processing. Basically you use it like this:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.alias('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
See the README.txt or <http://trentm.com/projects/cmdln/> for more
details.
"""
__version_info__ = (1, 1, 2)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import re
import cmd
import optparse
from pprint import pprint
import sys
#---- globals
LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
# An unspecified optional argument when None is a meaningful value.
_NOT_SPECIFIED = ("Not", "Specified")
# Pattern to match a TypeError message from a call that
# failed because of incorrect number of arguments (see
# Python/getargs.c).
_INCORRECT_NUM_ARGS_RE = re.compile(
r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
#---- exceptions
class CmdlnError(Exception):
"""A cmdln.py usage error."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CmdlnUserError(Exception):
"""An error by a user of a cmdln-based tool/shell."""
pass
#---- public methods and classes
def alias(*aliases):
"""Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command
"""
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate
class RawCmdln(cmd.Cmd):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.RawCmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
def do_status(self, argv):
print "handle 'svn status' command"
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
See <http://trentm.com/projects/cmdln> for more information.
"""
name = None # if unset, defaults basename(sys.argv[0])
prompt = None # if unset, defaults to self.name+"> "
version = None # if set, default top-level options include --version
# Default messages for some 'help' command error cases.
# They are interpolated with one arg: the command.
nohelp = "no help on '%s'"
unknowncmd = "unknown command: '%s'"
helpindent = '' # string with which to indent help output
def __init__(self, completekey='tab',
stdin=None, stdout=None, stderr=None):
"""Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically.
The optional arguments 'stdin', 'stdout' and 'stderr' specify
alternate input, output and error output file objects; if not
specified, sys.* are used.
If 'stdout' but not 'stderr' is specified, stdout is used for
error output. This is to provide least surprise for users used
to only the 'stdin' and 'stdout' options with cmd.Cmd.
"""
import sys
if self.name is None:
self.name = os.path.basename(sys.argv[0])
if self.prompt is None:
self.prompt = self.name+"> "
self._name_str = self._str(self.name)
self._prompt_str = self._str(self.prompt)
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
if stderr is not None:
self.stderr = stderr
elif stdout is not None:
self.stderr = stdout
else:
self.stderr = sys.stderr
self.cmdqueue = []
self.completekey = completekey
self.cmdlooping = False
def get_optparser(self):
"""Hook for subclasses to set the option parser for the
top-level command/shell.
This option parser is used retrieved and used by `.main()' to
handle top-level options.
The default implements a single '-h|--help' option. Sub-classes
can return None to have no options at the top-level. Typically
an instance of CmdlnOptionParser should be returned.
"""
version = (self.version is not None
and "%s %s" % (self._name_str, self.version)
or None)
return CmdlnOptionParser(self, version=version)
def postoptparse(self):
"""Hook method executed just after `.main()' parses top-level
options.
When called `self.options' holds the results of the option parse.
"""
pass
def main(self, argv=None, loop=LOOP_NEVER):
"""A possible mainline handler for a script, like so:
import cmdln
class MyCmd(cmdln.Cmdln):
name = "mycmd"
...
if __name__ == "__main__":
MyCmd().main()
By default this will use sys.argv to issue a single command to
'MyCmd', then exit. The 'loop' argument can be use to control
interactive shell behaviour.
Arguments:
"argv" (optional, default sys.argv) is the command to run.
It must be a sequence, where the first element is the
command name and subsequent elements the args for that
command.
"loop" (optional, default LOOP_NEVER) is a constant
indicating if a command loop should be started (i.e. an
interactive shell). Valid values (constants on this module):
LOOP_ALWAYS start loop and run "argv", if any
LOOP_NEVER run "argv" (or .emptyline()) and exit
LOOP_IF_EMPTY run "argv", if given, and exit;
otherwise, start loop
"""
if argv is None:
import sys
argv = sys.argv
else:
argv = argv[:] # don't modify caller's list
self.optparser = self.get_optparser()
if self.optparser: # i.e. optparser=None means don't process for opts
try:
self.options, args = self.optparser.parse_args(argv[1:])
except CmdlnUserError, ex:
msg = "%s: %s\nTry '%s help' for info.\n"\
% (self.name, ex, self.name)
self.stderr.write(self._str(msg))
self.stderr.flush()
return 1
except StopOptionProcessing, ex:
return 0
else:
self.options, args = None, argv[1:]
self.postoptparse()
if loop == LOOP_ALWAYS:
if args:
self.cmdqueue.append(args)
return self.cmdloop()
elif loop == LOOP_NEVER:
if args:
return self.cmd(args)
else:
return self.emptyline()
elif loop == LOOP_IF_EMPTY:
if args:
return self.cmd(args)
else:
return self.cmdloop()
def cmd(self, argv):
"""Run one command and exit.
"argv" is the arglist for the command to run. argv[0] is the
command to run. If argv is an empty list then the
'emptyline' handler is run.
Returns the return value from the command handler.
"""
assert isinstance(argv, (list, tuple)), \
"'argv' is not a sequence: %r" % argv
retval = None
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
return retval
def _str(self, s):
"""Safely convert the given str/unicode to a string for printing."""
try:
return str(s)
except UnicodeError:
#XXX What is the proper encoding to use here? 'utf-8' seems
# to work better than "getdefaultencoding" (usually
# 'ascii'), on OS X at least.
#import sys
#return s.encode(sys.getdefaultencoding(), "replace")
return s.encode("utf-8", "replace")
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse into an argv, and
dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
the argv. In other words, start a shell.
"intro" (optional) is a introductory message to print when
starting the command loop. This overrides the class
"intro" attribute, if any.
"""
self.cmdlooping = True
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is None:
intro = self.intro
if intro:
intro_str = self._str(intro)
self.stdout.write(intro_str+'\n')
self.stop = False
retval = None
while not self.stop:
if self.cmdqueue:
argv = self.cmdqueue.pop(0)
assert isinstance(argv, (list, tuple)), \
"item on 'cmdqueue' is not a sequence: %r" % argv
else:
if self.use_rawinput:
try:
line = raw_input(self._prompt_str)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self._prompt_str)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop '\n'
argv = line2argv(line)
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
self.lastretval = retval
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
self.cmdlooping = False
return retval
def precmd(self, argv):
"""Hook method executed just before the command argv is
interpreted, but after the input prompt is generated and issued.
"argv" is the cmd to run.
Returns an argv to run (i.e. this method can modify the command
to run).
"""
return argv
def postcmd(self, argv):
"""Hook method executed just after a command dispatch is finished.
"argv" is the command that was run.
"""
pass
def cmdexc(self, argv):
"""Called if an exception is raised in any of precmd(), onecmd(),
or postcmd(). If True is returned, the exception is deemed to have
been dealt with. Otherwise, the exception is re-raised.
The default implementation handles CmdlnUserError's, which
typically correspond to user error in calling commands (as
opposed to programmer error in the design of the script using
cmdln.py).
"""
import sys
type, exc, traceback = sys.exc_info()
if isinstance(exc, CmdlnUserError):
msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
% (self.name, argv[0], exc, self.name, argv[0])
self.stderr.write(self._str(msg))
self.stderr.flush()
return True
def onecmd(self, argv):
if not argv:
return self.emptyline()
self.lastcmd = argv
cmdname = self._get_canonical_cmd_name(argv[0])
if cmdname:
handler = self._get_cmd_handler(cmdname)
if handler:
return self._dispatch_cmd(handler, argv)
return self.default(argv)
def _dispatch_cmd(self, handler, argv):
return handler(argv)
def default(self, argv):
"""Hook called to handle a command for which there is no handler.
"argv" is the command and arguments to run.
The default implementation writes and error message to stderr
and returns an error exit status.
Returns a numeric command exit status.
"""
errmsg = self._str(self.unknowncmd % (argv[0],))
if self.cmdlooping:
self.stderr.write(errmsg+"\n")
else:
self.stderr.write("%s: %s\nTry '%s help' for info.\n"
% (self._name_str, errmsg, self._name_str))
self.stderr.flush()
return 1
def parseline(self, line):
# This is used by Cmd.complete (readline completer function) to
# massage the current line buffer before completion processing.
# We override to drop special '!' handling.
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def helpdefault(self, cmd, known):
"""Hook called to handle help on a command for which there is no
help handler.
"cmd" is the command name on which help was requested.
"known" is a boolean indicating if this command is known
(i.e. if there is a handler for it).
Returns a return code.
"""
if known:
msg = self._str(self.nohelp % (cmd,))
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n" % (self.name, msg))
else:
msg = self.unknowncmd % (cmd,)
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n"
"Try '%s help' for info.\n"
% (self.name, msg, self.name))
self.stderr.flush()
return 1
def do_help(self, argv):
"""${cmd_name}: give detailed help on a specific sub-command
Usage:
${name} help [COMMAND]
"""
if len(argv) > 1: # asking for help on a particular command
doc = None
cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
if not cmdname:
return self.helpdefault(argv[1], False)
else:
helpfunc = getattr(self, "help_"+cmdname, None)
if helpfunc:
doc = helpfunc()
else:
handler = self._get_cmd_handler(cmdname)
if handler:
doc = handler.__doc__
if doc is None:
return self.helpdefault(argv[1], handler != None)
else: # bare "help" command
doc = self.__class__.__doc__ # try class docstring
if doc is None:
# Try to provide some reasonable useful default help.
if self.cmdlooping: prefix = ""
else: prefix = self.name+' '
doc = """Usage:
%sCOMMAND [ARGS...]
%shelp [COMMAND]
${option_list}
${command_list}
${help_list}
""" % (prefix, prefix)
cmdname = None
if doc: # *do* have help content, massage and print that
doc = self._help_reindent(doc)
doc = self._help_preprocess(doc, cmdname)
doc = doc.rstrip() + '\n' # trim down trailing space
self.stdout.write(self._str(doc))
self.stdout.flush()
do_help.aliases = ["?"]
def _help_reindent(self, help, indent=None):
"""Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian'''
"""
if indent is None:
indent = self.helpindent
lines = help.splitlines(0)
_dedentlines(lines, skip_first_line=True)
lines = [(indent+line).rstrip() for line in lines]
return '\n'.join(lines)
def _help_preprocess(self, help, cmdname):
"""Hook to preprocess a help string before writing to stdout.
"help" is the help string to process.
"cmdname" is the canonical sub-command name for which help
is being given, or None if the help is not specific to a
command.
By default the following template variables are interpolated in
help content. (Note: these are similar to Python 2.4's
string.Template interpolation but not quite.)
${name}
The tool's/shell's name, i.e. 'self.name'.
${option_list}
A formatted table of options for this shell/tool.
${command_list}
A formatted table of available sub-commands.
${help_list}
A formatted table of additional help topics (i.e. 'help_*'
methods with no matching 'do_*' method).
${cmd_name}
The name (and aliases) for this sub-command formatted as:
"NAME (ALIAS1, ALIAS2, ...)".
${cmd_usage}
A formatted usage block inferred from the command function
signature.
${cmd_option_list}
A formatted table of options for this sub-command. (This is
only available for commands using the optparse integration,
i.e. using @cmdln.option decorators or manually setting the
'optparser' attribute on the 'do_*' method.)
Returns the processed help.
"""
preprocessors = {
"${name}": self._help_preprocess_name,
"${option_list}": self._help_preprocess_option_list,
"${command_list}": self._help_preprocess_command_list,
"${help_list}": self._help_preprocess_help_list,
"${cmd_name}": self._help_preprocess_cmd_name,
"${cmd_usage}": self._help_preprocess_cmd_usage,
"${cmd_option_list}": self._help_preprocess_cmd_option_list,
}
for marker, preprocessor in preprocessors.items():
if marker in help:
help = preprocessor(help, cmdname)
return help
def _help_preprocess_name(self, help, cmdname=None):
return help.replace("${name}", self.name)
def _help_preprocess_option_list(self, help, cmdname=None):
marker = "${option_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
if self.optparser:
# Setup formatting options and format.
# - Indentation of 4 is better than optparse default of 2.
# C.f. Damian Conway's discussion of this in Perl Best
# Practices.
self.optparser.formatter.indent_increment = 4
self.optparser.formatter.current_indent = indent_width
block = self.optparser.format_option_help() + '\n'
else:
block = ""
help = help.replace(indent+marker+suffix, block, 1)
return help
def _help_preprocess_command_list(self, help, cmdname=None):
marker = "${command_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
# Find any aliases for commands.
token2canonical = self._get_canonical_map()
aliases = {}
for token, cmdname in token2canonical.items():
if token == cmdname: continue
aliases.setdefault(cmdname, []).append(token)
# Get the list of (non-hidden) commands and their
# documentation, if any.
cmdnames = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"):
cmdnames[attr[3:]] = True
cmdnames = cmdnames.keys()
cmdnames.sort()
linedata = []
for cmdname in cmdnames:
if aliases.get(cmdname):
a = aliases[cmdname]
a.sort()
cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
else:
cmdstr = cmdname
doc = None
try:
helpfunc = getattr(self, 'help_'+cmdname)
except AttributeError:
handler = self._get_cmd_handler(cmdname)
if handler:
doc = handler.__doc__
else:
doc = helpfunc()
# Strip "${cmd_name}: " from the start of a command's doc. Best
# practice dictates that command help strings begin with this, but
# it isn't at all wanted for the command list.
to_strip = "${cmd_name}:"
if doc and doc.startswith(to_strip):
#log.debug("stripping %r from start of %s's help string",
# to_strip, cmdname)
doc = doc[len(to_strip):].lstrip()
linedata.append( (cmdstr, doc) )
if linedata:
subindent = indent + ' '*4
lines = _format_linedata(linedata, subindent, indent_width+4)
block = indent + "Commands:\n" \
+ '\n'.join(lines) + "\n\n"
help = help.replace(indent+marker+suffix, block, 1)
return help
def _gen_names_and_attrs(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes.pop(0)
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
for name in dir(aclass):
yield (name, getattr(aclass, name))
def _help_preprocess_help_list(self, help, cmdname=None):
marker = "${help_list}"
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
# Determine the additional help topics, if any.
helpnames = {}
token2cmdname = self._get_canonical_map()
for attrname, attr in self._gen_names_and_attrs():
if not attrname.startswith("help_"): continue
helpname = attrname[5:]
if helpname not in token2cmdname:
helpnames[helpname] = attr
if helpnames:
linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
linedata.sort()
subindent = indent + ' '*4
lines = _format_linedata(linedata, subindent, indent_width+4)
block = (indent
+ "Additional help topics (run `%s help TOPIC'):\n" % self.name
+ '\n'.join(lines)
+ "\n\n")
else:
block = ''
help = help.replace(indent+marker+suffix, block, 1)
return help
def _help_preprocess_cmd_name(self, help, cmdname=None):
marker = "${cmd_name}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
s = cmdname
if hasattr(handler, "aliases"):
s += " (%s)" % (", ".join(handler.aliases))
help = help.replace(marker, s)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_usage(self, help, cmdname=None):
marker = "${cmd_usage}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
# Extract the introspection bits we need.
func = handler.im_func
if func.func_defaults:
func_defaults = list(func.func_defaults)
else:
func_defaults = []
co_argcount = func.func_code.co_argcount
co_varnames = func.func_code.co_varnames
co_flags = func.func_code.co_flags
CO_FLAGS_ARGS = 4
CO_FLAGS_KWARGS = 8
# Adjust argcount for possible *args and **kwargs arguments.
argcount = co_argcount
if co_flags & CO_FLAGS_ARGS: argcount += 1
if co_flags & CO_FLAGS_KWARGS: argcount += 1
# Determine the usage string.
usage = "%s %s" % (self.name, cmdname)
if argcount <= 2: # handler ::= do_FOO(self, argv)
usage += " [ARGS...]"
elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
argnames = list(co_varnames[3:argcount])
tail = ""
if co_flags & CO_FLAGS_KWARGS:
name = argnames.pop(-1)
import warnings
# There is no generally accepted mechanism for passing
# keyword arguments from the command line. Could
# *perhaps* consider: arg=value arg2=value2 ...
warnings.warn("argument '**%s' on '%s.%s' command "
"handler will never get values"
% (name, self.__class__.__name__,
func.func_name))
if co_flags & CO_FLAGS_ARGS:
name = argnames.pop(-1)
tail = "[%s...]" % name.upper()
while func_defaults:
func_defaults.pop(-1)
name = argnames.pop(-1)
tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
while argnames:
name = argnames.pop(-1)
tail = "%s %s" % (name.upper(), tail)
usage += ' ' + tail
block_lines = [
self.helpindent + "Usage:",
self.helpindent + ' '*4 + usage
]
block = '\n'.join(block_lines) + '\n\n'
help = help.replace(indent+marker+suffix, block, 1)
return help
#TODO: this only makes sense as part of the Cmdln class.
# Add hooks to add help preprocessing template vars and put
# this one on that class.
def _help_preprocess_cmd_option_list(self, help, cmdname=None):
marker = "${cmd_option_list}"
handler = self._get_cmd_handler(cmdname)
if not handler:
raise CmdlnError("cannot preprocess '%s' into help string: "
"could not find command handler for %r"
% (marker, cmdname))
indent, indent_width = _get_indent(marker, help)
suffix = _get_trailing_whitespace(marker, help)
if hasattr(handler, "optparser"):
# Setup formatting options and format.
# - Indentation of 4 is better than optparse default of 2.
# C.f. Damian Conway's discussion of this in Perl Best
# Practices.
handler.optparser.formatter.indent_increment = 4
handler.optparser.formatter.current_indent = indent_width
block = handler.optparser.format_option_help() + '\n'
else:
block = ""
help = help.replace(indent+marker+suffix, block, 1)
return help
def _get_canonical_cmd_name(self, token):
map = self._get_canonical_map()
return map.get(token, None)
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler"
% (alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr)
def _get_cmd_handler(self, cmdname):
handler = None
try:
handler = getattr(self, 'do_' + cmdname)
except AttributeError:
try:
# Private command handlers begin with "_do_".
handler = getattr(self, '_do_' + cmdname)
except AttributeError:
pass
return handler
def _do_EOF(self, argv):
# Default EOF handler
# Note: an actual EOF is redirected to this command.
#TODO: separate name for this. Currently it is available from
# command-line. Is that okay?
self.stdout.write('\n')
self.stdout.flush()
self.stop = True
def emptyline(self):
# Different from cmd.Cmd: don't repeat the last command for an
# emptyline.
if self.cmdlooping:
pass
else:
return self.do_help(["help"])
#---- optparse.py extension to fix (IMO) some deficiencies
#
# See the class _OptionParserEx docstring for details.
#
class StopOptionProcessing(Exception):
"""Indicate that option *and argument* processing should stop
cleanly. This is not an error condition. It is similar in spirit to
StopIteration. This is raised by _OptionParserEx's default "help"
and "version" option actions and can be raised by custom option
callbacks too.
Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
usage is:
parser = CmdlnOptionParser(mycmd)
parser.add_option("-f", "--force", dest="force")
...
try:
opts, args = parser.parse_args()
except StopOptionProcessing:
# normal termination, "--help" was probably given
sys.exit(0)
"""
class _OptionParserEx(optparse.OptionParser):
"""An optparse.OptionParser that uses exceptions instead of sys.exit.
This class is an extension of optparse.OptionParser that differs
as follows:
- Correct (IMO) the default OptionParser error handling to never
sys.exit(). Instead OptParseError exceptions are passed through.
- Add the StopOptionProcessing exception (a la StopIteration) to
indicate normal termination of option processing.
See StopOptionProcessing's docstring for details.
I'd also like to see the following in the core optparse.py, perhaps
as a RawOptionParser which would serve as a base class for the more
generally used OptionParser (that works as current):
- Remove the implicit addition of the -h|--help and --version
options. They can get in the way (e.g. if want '-?' and '-V' for
these as well) and it is not hard to do:
optparser.add_option("-h", "--help", action="help")
optparser.add_option("--version", action="version")
These are good practices, just not valid defaults if they can
get in the way.
"""
def error(self, msg):
raise optparse.OptParseError(msg)
def exit(self, status=0, msg=None):
if status == 0:
raise StopOptionProcessing(msg)
else:
#TODO: don't lose status info here
raise optparse.OptParseError(msg)
#---- optparse.py-based option processing support
class CmdlnOptionParser(_OptionParserEx):
"""An optparse.OptionParser class more appropriate for top-level
Cmdln options. For parsing of sub-command options, see
SubCmdOptionParser.
Changes:
- disable_interspersed_args() by default, because a Cmdln instance
has sub-commands which may themselves have options.
- Redirect print_help() to the Cmdln.do_help() which is better
equiped to handle the "help" action.
- error() will raise a CmdlnUserError: OptionParse.error() is meant
to be called for user errors. Raising a well-known error here can
make error handling clearer.
- Also see the changes in _OptionParserEx.
"""
def __init__(self, cmdln, **kwargs):
self.cmdln = cmdln
kwargs["prog"] = self.cmdln.name
_OptionParserEx.__init__(self, **kwargs)
self.disable_interspersed_args()
def print_help(self, file=None):
self.cmdln.onecmd(["help"])
def error(self, msg):
raise CmdlnUserError(msg)
class SubCmdOptionParser(_OptionParserEx):
def set_cmdln_info(self, cmdln, subcmd):
"""Called by Cmdln to pass relevant info about itself needed
for print_help().
"""
self.cmdln = cmdln
self.subcmd = subcmd
def print_help(self, file=None):
self.cmdln.onecmd(["help", self.subcmd])
def error(self, msg):
raise CmdlnUserError(msg)
def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, "optparser"):
f.optparser = SubCmdOptionParser()
f.optparser.add_option(*args, **kwargs)
return f
return decorate
class Cmdln(RawCmdln):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
'Cmdln' extends 'RawCmdln' by providing optparse option processing
integration. See this class' _dispatch_cmd() docstring and
<http://trentm.com/projects/cmdln> for more information.
"""
def _dispatch_cmd(self, handler, argv):
"""Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args.
"""
co_argcount = handler.im_func.func_code.co_argcount
if co_argcount == 2: # handler ::= do_foo(self, argv)
return handler(argv)
elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
try:
optparser = handler.optparser
except AttributeError:
optparser = handler.im_func.optparser = SubCmdOptionParser()
assert isinstance(optparser, SubCmdOptionParser)
optparser.set_cmdln_info(self, argv[0])
try:
opts, args = optparser.parse_args(argv[1:])
except StopOptionProcessing:
#TODO: this doesn't really fly for a replacement of
# optparse.py behaviour, does it?
return 0 # Normal command termination
try:
return handler(argv[0], opts, *args)
except TypeError, ex:
# Some TypeError's are user errors:
# do_foo() takes at least 4 arguments (3 given)
# do_foo() takes at most 5 arguments (6 given)
# do_foo() takes exactly 5 arguments (6 given)
# Raise CmdlnUserError for these with a suitably
# massaged error message.
import sys
tb = sys.exc_info()[2] # the traceback object
if tb.tb_next is not None:
# If the traceback is more than one level deep, then the
# TypeError do *not* happen on the "handler(...)" call
# above. In that we don't want to handle it specially
# here: it would falsely mask deeper code errors.
raise
msg = ex.args[0]
match = _INCORRECT_NUM_ARGS_RE.search(msg)
if match:
msg = list(match.groups())
msg[1] = int(msg[1]) - 3
if msg[1] == 1:
msg[2] = msg[2].replace("arguments", "argument")
msg[3] = int(msg[3]) - 3
msg = ''.join(map(str, msg))
raise CmdlnUserError(msg)
else:
raise
else:
raise CmdlnError("incorrect argcount for %s(): takes %d, must "
"take 2 for 'argv' signature or 3+ for 'opts' "
"signature" % (handler.__name__, co_argcount))
#---- internal support functions
def _format_linedata(linedata, indent, indent_width):
"""Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 15 columns.
"""
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 16
NAME_WIDTH = max([len(s) for s,d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
else:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
for namestr, doc in linedata:
line = indent + namestr
if len(namestr) <= NAME_WIDTH:
line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
else:
lines.append(line)
line = indent + ' ' * (NAME_WIDTH + SPACING)
line += _summarize_doc(doc, DOC_WIDTH)
lines.append(line.rstrip())
return lines
def _summarize_doc(doc, length=60):
r"""Parse out a short one line summary from the given doclines.
"doc" is the doc string to summarize.
"length" is the max length for the summary
>>> _summarize_doc("this function does this")
'this function does this'
>>> _summarize_doc("this function does this", 10)
'this fu...'
>>> _summarize_doc("this function does this\nand that")
'this function does this and that'
>>> _summarize_doc("this function does this\n\nand that")
'this function does this'
"""
import re
if doc is None:
return ""
assert length > 3, "length <= 3 is absurdly short for a doc summary"
doclines = doc.strip().splitlines(0)
if not doclines:
return ""
summlines = []
for i, line in enumerate(doclines):
stripped = line.strip()
if not stripped:
break
summlines.append(stripped)
if len(''.join(summlines)) >= length:
break
summary = ' '.join(summlines)
if len(summary) > length:
summary = summary[:length-3] + "..."
return summary
def line2argv(line):
r"""Parse the given line into an argument vector.
"line" is the line of input to parse.
This may get niggly when dealing with quoting and escaping. The
current state of this parsing may not be completely thorough/correct
in this respect.
>>> from cmdln import line2argv
>>> line2argv("foo")
['foo']
>>> line2argv("foo bar")
['foo', 'bar']
>>> line2argv("foo bar ")
['foo', 'bar']
>>> line2argv(" foo bar")
['foo', 'bar']
Quote handling:
>>> line2argv("'foo bar'")
['foo bar']
>>> line2argv('"foo bar"')
['foo bar']
>>> line2argv(r'"foo\"bar"')
['foo"bar']
>>> line2argv("'foo bar' spam")
['foo bar', 'spam']
>>> line2argv("'foo 'bar spam")
['foo bar', 'spam']
>>> line2argv('some\tsimple\ttests')
['some', 'simple', 'tests']
>>> line2argv('a "more complex" test')
['a', 'more complex', 'test']
>>> line2argv('a more="complex test of " quotes')
['a', 'more=complex test of ', 'quotes']
>>> line2argv('a more" complex test of " quotes')
['a', 'more complex test of ', 'quotes']
>>> line2argv('an "embedded \\"quote\\""')
['an', 'embedded "quote"']
# Komodo bug 48027
>>> line2argv('foo bar C:\\')
['foo', 'bar', 'C:\\']
# Komodo change 127581
>>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
['\\test\\slash', 'foo bar', 'foo"bar']
# Komodo change 127629
>>> if sys.platform == "win32":
... line2argv(r'\foo\bar') == ['\\foo\\bar']
... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
... line2argv('"foo') == ['foo']
... else:
... line2argv(r'\foo\bar') == ['foobar']
... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
... try:
... line2argv('"foo')
... except ValueError, ex:
... "not terminated" in str(ex)
True
True
True
"""
import string
line = line.strip()
argv = []
state = "default"
arg = None # the current argument being parsed
i = -1
while 1:
i += 1
if i >= len(line): break
ch = line[i]
if ch == "\\" and i+1 < len(line):
# escaped char always added to arg, regardless of state
if arg is None: arg = ""
if (sys.platform == "win32"
or state in ("double-quoted", "single-quoted")
) and line[i+1] not in tuple('"\''):
arg += ch
i += 1
arg += line[i]
continue
if state == "single-quoted":
if ch == "'":
state = "default"
else:
arg += ch
elif state == "double-quoted":
if ch == '"':
state = "default"
else:
arg += ch
elif state == "default":
if ch == '"':
if arg is None: arg = ""
state = "double-quoted"
elif ch == "'":
if arg is None: arg = ""
state = "single-quoted"
elif ch in string.whitespace:
if arg is not None:
argv.append(arg)
arg = None
else:
if arg is None: arg = ""
arg += ch
if arg is not None:
argv.append(arg)
if not sys.platform == "win32" and state != "default":
raise ValueError("command line is not terminated: unfinished %s "
"segment" % state)
return argv
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print argv2line(['foo" bar'])
'foo" bar'
>>> print argv2line(["foo' bar"])
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"'+arg+'"'
elif ' ' in arg and "'" not in arg:
arg = "'"+arg+"'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"'+arg+'"'
escapedArgs.append(arg)
return ' '.join(escapedArgs)
# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
def _get_indent(marker, s, tab_width=8):
"""_get_indent(marker, s, tab_width=8) ->
(<indentation-of-'marker'>, <indentation-width>)"""
# Figure out how much the marker is indented.
INDENT_CHARS = tuple(' \t')
start = s.index(marker)
i = start
while i > 0:
if s[i-1] not in INDENT_CHARS:
break
i -= 1
indent = s[i:start]
indent_width = 0
for ch in indent:
if ch == ' ':
indent_width += 1
elif ch == '\t':
indent_width += tab_width - (indent_width % tab_width)
return indent, indent_width
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
suffix += s[i+1]
break
else:
break
i += 1
return suffix
#---- bash completion support
# Note: This is still experimental. I expect to change this
# significantly.
#
# To get Bash completion for a cmdln.Cmdln class, run the following
# bash command:
# $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
# For example:
# $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
#
#TODO: Simplify the above so don't have to given path to script (try to
# find it on PATH, if possible). Could also make class name
# optional if there is only one in the module (common case).
if __name__ == "__main__" and len(sys.argv) == 6:
def _log(s):
return # no-op, comment out for debugging
from os.path import expanduser
fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
fout.write(str(s) + '\n')
fout.close()
# Recipe: module_from_path (1.0.1+)
def _module_from_path(path):
import imp, os, sys
path = os.path.expanduser(path)
dir = os.path.dirname(path) or os.curdir
name = os.path.splitext(os.path.basename(path))[0]
sys.path.insert(0, dir)
try:
iinfo = imp.find_module(name, [dir])
return imp.load_module(name, *iinfo)
finally:
sys.path.remove(dir)
def _get_bash_cplns(script_path, class_name, cmd_name,
token, preceding_token):
_log('--')
_log('get_cplns(%r, %r, %r, %r, %r)'
% (script_path, class_name, cmd_name, token, preceding_token))
comp_line = os.environ["COMP_LINE"]
comp_point = int(os.environ["COMP_POINT"])
_log("COMP_LINE: %r" % comp_line)
_log("COMP_POINT: %r" % comp_point)
try:
script = _module_from_path(script_path)
except ImportError, ex:
_log("error importing `%s': %s" % (script_path, ex))
return []
shell = getattr(script, class_name)()
cmd_map = shell._get_canonical_map()
del cmd_map["EOF"]
# Determine if completing the sub-command name.
parts = comp_line[:comp_point].split(None, 1)
_log(parts)
if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
#TODO: if parts[1].startswith('-'): handle top-level opts
_log("complete sub-command names")
matches = {}
for name, canon_name in cmd_map.items():
if name.startswith(token):
matches[name] = canon_name
if not matches:
return []
elif len(matches) == 1:
return matches.keys()
elif len(set(matches.values())) == 1:
return [matches.values()[0]]
else:
return matches.keys()
# Otherwise, complete options for the given sub-command.
#TODO: refine this so it does the right thing with option args
if token.startswith('-'):
cmd_name = comp_line.split(None, 2)[1]
try:
cmd_canon_name = cmd_map[cmd_name]
except KeyError:
return []
handler = shell._get_cmd_handler(cmd_canon_name)
optparser = getattr(handler, "optparser", None)
if optparser is None:
optparser = SubCmdOptionParser()
opt_strs = []
for option in optparser.option_list:
for opt_str in option._short_opts + option._long_opts:
if opt_str.startswith(token):
opt_strs.append(opt_str)
return opt_strs
return []
for cpln in _get_bash_cplns(*sys.argv[1:]):
print cpln
########NEW FILE########
__FILENAME__ = errors
#!/usr/bin/python -tt
#
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class CreatorError(Exception):
"""An exception base class for all imgcreate errors."""
keyword = '<creator>'
def __init__(self, msg):
self.msg = msg
def __str__(self):
if isinstance(self.msg, unicode):
self.msg = self.msg.encode('utf-8', 'ignore')
else:
self.msg = str(self.msg)
return self.keyword + self.msg
class Usage(CreatorError):
keyword = '<usage>'
def __str__(self):
if isinstance(self.msg, unicode):
self.msg = self.msg.encode('utf-8', 'ignore')
else:
self.msg = str(self.msg)
return self.keyword + self.msg + ', please use "--help" for more info'
class Abort(CreatorError):
keyword = ''
class ConfigError(CreatorError):
keyword = '<config>'
class KsError(CreatorError):
keyword = '<kickstart>'
class RepoError(CreatorError):
keyword = '<repo>'
class RpmError(CreatorError):
keyword = '<rpm>'
class MountError(CreatorError):
keyword = '<mount>'
class SnapshotError(CreatorError):
keyword = '<snapshot>'
class SquashfsError(CreatorError):
keyword = '<squashfs>'
class BootstrapError(CreatorError):
keyword = '<bootstrap>'
class RuntimeError(CreatorError):
keyword = '<runtime>'
########NEW FILE########
__FILENAME__ = Fiemap
""" This module implements python API for the FIEMAP ioctl. The FIEMAP ioctl
allows to find holes and mapped areas in a file. """
# Note, a lot of code in this module is not very readable, because it deals
# with the rather complex FIEMAP ioctl. To understand the code, you need to
# know the FIEMAP interface, which is documented in the
# Documentation/filesystems/fiemap.txt file in the Linux kernel sources.
# Disable the following pylint recommendations:
# * Too many instance attributes (R0902)
# pylint: disable=R0902
import os
import struct
import array
import fcntl
from mic.utils.misc import get_block_size
# Format string for 'struct fiemap'
_FIEMAP_FORMAT = "=QQLLLL"
# sizeof(struct fiemap)
_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
# Format string for 'struct fiemap_extent'
_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
# sizeof(struct fiemap_extent)
_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
# The FIEMAP ioctl number
_FIEMAP_IOCTL = 0xC020660B
# Minimum buffer which is required for 'class Fiemap' to operate
MIN_BUFFER_SIZE = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE
# The default buffer size for 'class Fiemap'
DEFAULT_BUFFER_SIZE = 256 * 1024
class Error(Exception):
""" A class for exceptions generated by this module. We currently support
only one type of exceptions, and we basically throw human-readable problem
description in case of errors. """
pass
class Fiemap:
""" This class provides API to the FIEMAP ioctl. Namely, it allows to
iterate over all mapped blocks and over all holes. """
def _open_image_file(self):
""" Open the image file. """
try:
self._f_image = open(self._image_path, 'rb')
except IOError as err:
raise Error("cannot open image file '%s': %s" \
% (self._image_path, err))
self._f_image_needs_close = True
def __init__(self, image, buf_size = DEFAULT_BUFFER_SIZE):
""" Initialize a class instance. The 'image' argument is full path to
the file to operate on, or a file object to operate on.
The 'buf_size' argument is the size of the buffer for 'struct
fiemap_extent' elements which will be used when invoking the FIEMAP
ioctl. The larger is the buffer, the less times the FIEMAP ioctl will
be invoked. """
self._f_image_needs_close = False
if hasattr(image, "fileno"):
self._f_image = image
self._image_path = image.name
else:
self._image_path = image
self._open_image_file()
# Validate 'buf_size'
if buf_size < MIN_BUFFER_SIZE:
raise Error("too small buffer (%d bytes), minimum is %d bytes" \
% (buf_size, MIN_BUFFER_SIZE))
# How many 'struct fiemap_extent' elements fit the buffer
buf_size -= _FIEMAP_SIZE
self._fiemap_extent_cnt = buf_size / _FIEMAP_EXTENT_SIZE
self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
self._buf_size += _FIEMAP_SIZE
# Allocate a mutable buffer for the FIEMAP ioctl
self._buf = array.array('B', [0] * self._buf_size)
self.image_size = os.fstat(self._f_image.fileno()).st_size
try:
self.block_size = get_block_size(self._f_image)
except IOError as err:
raise Error("cannot get block size for '%s': %s" \
% (self._image_path, err))
self.blocks_cnt = self.image_size + self.block_size - 1
self.blocks_cnt /= self.block_size
# Synchronize the image file to make sure FIEMAP returns correct values
try:
self._f_image.flush()
except IOError as err:
raise Error("cannot flush image file '%s': %s" \
% (self._image_path, err))
try:
os.fsync(self._f_image.fileno()),
except OSError as err:
raise Error("cannot synchronize image file '%s': %s " \
% (self._image_path, err.strerror))
# Check if the FIEMAP ioctl is supported
self.block_is_mapped(0)
def __del__(self):
""" The class destructor which closes the opened files. """
if self._f_image_needs_close:
self._f_image.close()
def _invoke_fiemap(self, block, count):
""" Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
block number 'block'.
The full result of the operation is stored in 'self._buf' on exit.
Returns the unpacked 'struct fiemap' data structure in form of a python
list (just like 'struct.upack()'). """
if block < 0 or block >= self.blocks_cnt:
raise Error("bad block number %d, should be within [0, %d]" \
% (block, self.blocks_cnt))
# Initialize the 'struct fiemap' part of the buffer
struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
count * self.block_size, 0, 0,
self._fiemap_extent_cnt, 0)
try:
fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
except IOError as err:
error_msg = "the FIEMAP ioctl failed for '%s': %s" \
% (self._image_path, err)
if err.errno == os.errno.EPERM or err.errno == os.errno.EACCES:
# The FIEMAP ioctl was added in kernel version 2.6.28 in 2008
error_msg += " (looks like your kernel does not support FIEMAP)"
raise Error(error_msg)
return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
def block_is_mapped(self, block):
""" This function returns 'True' if block number 'block' of the image
file is mapped and 'False' otherwise. """
struct_fiemap = self._invoke_fiemap(block, 1)
# The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
# If it contains zero, the block is not mapped, otherwise it is
# mapped.
return bool(struct_fiemap[3])
def block_is_unmapped(self, block):
""" This function returns 'True' if block number 'block' of the image
file is not mapped (hole) and 'False' otherwise. """
return not self.block_is_mapped(block)
def _unpack_fiemap_extent(self, index):
""" Unpack a 'struct fiemap_extent' structure object number 'index'
from the internal 'self._buf' buffer. """
offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
return struct.unpack(_FIEMAP_EXTENT_FORMAT,
self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
def _do_get_mapped_ranges(self, start, count):
""" Implements most the functionality for the 'get_mapped_ranges()'
generator: invokes the FIEMAP ioctl, walks through the mapped
extents and yields mapped block ranges. However, the ranges may be
consecutive (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()'
simply merges them. """
block = start
while block < start + count:
struct_fiemap = self._invoke_fiemap(block, count)
mapped_extents = struct_fiemap[3]
if mapped_extents == 0:
# No more mapped blocks
return
extent = 0
while extent < mapped_extents:
fiemap_extent = self._unpack_fiemap_extent(extent)
# Start of the extent
extent_start = fiemap_extent[0]
# Starting block number of the extent
extent_block = extent_start / self.block_size
# Length of the extent
extent_len = fiemap_extent[2]
# Count of blocks in the extent
extent_count = extent_len / self.block_size
# Extent length and offset have to be block-aligned
assert extent_start % self.block_size == 0
assert extent_len % self.block_size == 0
if extent_block > start + count - 1:
return
first = max(extent_block, block)
last = min(extent_block + extent_count, start + count) - 1
yield (first, last)
extent += 1
block = extent_block + extent_count
def get_mapped_ranges(self, start, count):
""" A generator which yields ranges of mapped blocks in the file. The
ranges are tuples of 2 elements: [first, last], where 'first' is the
first mapped block and 'last' is the last mapped block.
The ranges are yielded for the area of the file of size 'count' blocks,
starting from block 'start'. """
iterator = self._do_get_mapped_ranges(start, count)
first_prev, last_prev = iterator.next()
for first, last in iterator:
if last_prev == first - 1:
last_prev = last
else:
yield (first_prev, last_prev)
first_prev, last_prev = first, last
yield (first_prev, last_prev)
def get_unmapped_ranges(self, start, count):
""" Just like 'get_mapped_ranges()', but yields unmapped block ranges
instead (holes). """
hole_first = start
for first, last in self._do_get_mapped_ranges(start, count):
if first > hole_first:
yield (hole_first, first - 1)
hole_first = last + 1
if hole_first < start + count:
yield (hole_first, start + count - 1)
########NEW FILE########
__FILENAME__ = fs_related
#!/usr/bin/python -tt
#
# Copyright (c) 2007, Red Hat, Inc.
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import os
import sys
import errno
import stat
import random
import string
import time
import uuid
from mic import msger
from mic.utils import runner
from mic.utils.errors import *
from mic.utils.oe.misc import *
def find_binary_inchroot(binary, chroot):
paths = ["/usr/sbin",
"/usr/bin",
"/sbin",
"/bin"
]
for path in paths:
bin_path = "%s/%s" % (path, binary)
if os.path.exists("%s/%s" % (chroot, bin_path)):
return bin_path
return None
def find_binary_path(binary):
if os.environ.has_key("PATH"):
paths = os.environ["PATH"].split(":")
else:
paths = []
if os.environ.has_key("HOME"):
paths += [os.environ["HOME"] + "/bin"]
paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]
for path in paths:
bin_path = "%s/%s" % (path, binary)
if os.path.exists(bin_path):
return bin_path
print "External command '%s' not found, exiting." % binary
print " (Please install '%s' on your host system)" % binary
sys.exit(1)
def makedirs(dirname):
"""A version of os.makedirs() that doesn't throw an
exception if the leaf directory already exists.
"""
try:
os.makedirs(dirname)
except OSError, err:
if err.errno != errno.EEXIST:
raise
def mksquashfs(in_img, out_img):
fullpathmksquashfs = find_binary_path("mksquashfs")
args = [fullpathmksquashfs, in_img, out_img]
if not sys.stdout.isatty():
args.append("-no-progress")
ret = runner.show(args)
if ret != 0:
raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret))
def resize2fs(fs, size):
resize2fs = find_binary_path("resize2fs")
if size == 0:
# it means to minimalize it
return runner.show([resize2fs, '-M', fs])
else:
return runner.show([resize2fs, fs, "%sK" % (size / 1024,)])
def my_fuser(fp):
fuser = find_binary_path("fuser")
if not os.path.exists(fp):
return False
rc = runner.quiet([fuser, "-s", fp])
if rc == 0:
for pid in runner.outs([fuser, fp]).split():
fd = open("/proc/%s/cmdline" % pid, "r")
cmdline = fd.read()
fd.close()
if cmdline[:-1] == "/bin/bash":
return True
# not found
return False
class BindChrootMount:
"""Represents a bind mount of a directory into a chroot."""
def __init__(self, src, chroot, dest = None, option = None):
self.root = os.path.abspath(os.path.expanduser(chroot))
self.option = option
self.orig_src = self.src = src
if os.path.islink(src):
self.src = os.readlink(src)
if not self.src.startswith('/'):
self.src = os.path.abspath(os.path.join(os.path.dirname(src),
self.src))
if not dest:
dest = self.src
self.dest = os.path.join(self.root, dest.lstrip('/'))
self.mounted = False
self.mountcmd = find_binary_path("mount")
self.umountcmd = find_binary_path("umount")
def ismounted(self):
with open('/proc/mounts') as f:
for line in f:
if line.split()[1] == os.path.abspath(self.dest):
return True
return False
def has_chroot_instance(self):
lock = os.path.join(self.root, ".chroot.lock")
return my_fuser(lock)
def mount(self):
if self.mounted or self.ismounted():
return
makedirs(self.dest)
rc = runner.show([self.mountcmd, "--bind", self.src, self.dest])
if rc != 0:
raise MountError("Bind-mounting '%s' to '%s' failed" %
(self.src, self.dest))
if self.option:
rc = runner.show([self.mountcmd, "--bind", "-o", "remount,%s" % self.option, self.dest])
if rc != 0:
raise MountError("Bind-remounting '%s' failed" % self.dest)
self.mounted = True
if os.path.islink(self.orig_src):
dest = os.path.join(self.root, self.orig_src.lstrip('/'))
if not os.path.exists(dest):
os.symlink(self.src, dest)
def unmount(self):
if self.has_chroot_instance():
return
if self.ismounted():
runner.show([self.umountcmd, "-l", self.dest])
self.mounted = False
class LoopbackMount:
"""LoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, fstype = None):
self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True)
self.losetup = False
self.losetupcmd = find_binary_path("losetup")
def cleanup(self):
self.diskmount.cleanup()
def unmount(self):
self.diskmount.unmount()
def lounsetup(self):
if self.losetup:
runner.show([self.losetupcmd, "-d", self.loopdev])
self.losetup = False
self.loopdev = None
def loopsetup(self):
if self.losetup:
return
self.loopdev = get_loop_device(self.losetupcmd, self.lofile)
self.losetup = True
def mount(self):
self.diskmount.mount()
class SparseLoopbackMount(LoopbackMount):
"""SparseLoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, size, fstype = None):
self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True)
def expand(self, create = False, size = None):
self.diskmount.disk.expand(create, size)
def truncate(self, size = None):
self.diskmount.disk.truncate(size)
def create(self):
self.diskmount.disk.create()
class SparseExtLoopbackMount(SparseLoopbackMount):
"""SparseExtLoopbackMount compatibility layer for old API"""
def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel):
self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True)
def __format_filesystem(self):
self.diskmount.__format_filesystem()
def create(self):
self.diskmount.disk.create()
def resize(self, size = None):
return self.diskmount.__resize_filesystem(size)
def mount(self):
self.diskmount.mount()
def __fsck(self):
self.extdiskmount.__fsck()
def __get_size_from_filesystem(self):
return self.diskmount.__get_size_from_filesystem()
def __resize_to_minimal(self):
return self.diskmount.__resize_to_minimal()
def resparse(self, size = None):
return self.diskmount.resparse(size)
class Disk:
"""Generic base object for a disk
The 'create' method must make the disk visible as a block device - eg
by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup'
method must undo the 'create' operation.
"""
def __init__(self, size, device = None):
self._device = device
self._size = size
def create(self):
pass
def cleanup(self):
pass
def get_device(self):
return self._device
def set_device(self, path):
self._device = path
device = property(get_device, set_device)
def get_size(self):
return self._size
size = property(get_size)
class RawDisk(Disk):
"""A Disk backed by a block device.
Note that create() is a no-op.
"""
def __init__(self, size, device):
Disk.__init__(self, size, device)
def fixed(self):
return True
def exists(self):
return True
class DiskImage(Disk):
"""
A Disk backed by a file.
"""
def __init__(self, image_file, size):
Disk.__init__(self, size)
self.image_file = image_file
def exists(self):
return os.path.exists(self.image_file)
def create(self):
if self.device is not None:
return
blocks = self.size / 1024
if self.size - blocks * 1024:
blocks += 1
# create disk image
dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=1" % \
(self.image_file, blocks)
rc, out = exec_cmd(dd_cmd)
self.device = self.image_file
class LoopbackDisk(Disk):
"""A Disk backed by a file via the loop module."""
def __init__(self, lofile, size):
Disk.__init__(self, size)
self.lofile = lofile
self.losetupcmd = find_binary_path("losetup")
def fixed(self):
return False
def exists(self):
return os.path.exists(self.lofile)
def create(self):
if self.device is not None:
return
self.device = get_loop_device(self.losetupcmd, self.lofile)
def cleanup(self):
if self.device is None:
return
msger.debug("Losetup remove %s" % self.device)
rc = runner.show([self.losetupcmd, "-d", self.device])
self.device = None
class SparseLoopbackDisk(LoopbackDisk):
"""A Disk backed by a sparse file via the loop module."""
def __init__(self, lofile, size):
LoopbackDisk.__init__(self, lofile, size)
def expand(self, create = False, size = None):
flags = os.O_WRONLY
if create:
flags |= os.O_CREAT
if not os.path.exists(self.lofile):
makedirs(os.path.dirname(self.lofile))
if size is None:
size = self.size
msger.debug("Extending sparse file %s to %d" % (self.lofile, size))
if create:
fd = os.open(self.lofile, flags, 0644)
else:
fd = os.open(self.lofile, flags)
if size <= 0:
size = 1
try:
os.ftruncate(fd, size)
except:
# may be limited by 2G in 32bit env
os.ftruncate(fd, 2**31L)
os.close(fd)
def truncate(self, size = None):
if size is None:
size = self.size
msger.debug("Truncating sparse file %s to %d" % (self.lofile, size))
fd = os.open(self.lofile, os.O_WRONLY)
os.ftruncate(fd, size)
os.close(fd)
def create(self):
self.expand(create = True)
LoopbackDisk.create(self)
class Mount:
"""A generic base class to deal with mounting things."""
def __init__(self, mountdir):
self.mountdir = mountdir
def cleanup(self):
self.unmount()
def mount(self, options = None):
pass
def unmount(self):
pass
class DiskMount(Mount):
"""A Mount object that handles mounting of a Disk."""
def __init__(self, disk, mountdir, fstype = None, rmmountdir = True):
Mount.__init__(self, mountdir)
self.disk = disk
self.fstype = fstype
self.rmmountdir = rmmountdir
self.mounted = False
self.rmdir = False
if fstype:
self.mkfscmd = find_binary_path("mkfs." + self.fstype)
else:
self.mkfscmd = None
self.mountcmd = find_binary_path("mount")
self.umountcmd = find_binary_path("umount")
def cleanup(self):
Mount.cleanup(self)
self.disk.cleanup()
def unmount(self):
if self.mounted:
msger.debug("Unmounting directory %s" % self.mountdir)
runner.quiet('sync') # sync the data on this mount point
rc = runner.show([self.umountcmd, "-l", self.mountdir])
if rc == 0:
self.mounted = False
else:
raise MountError("Failed to umount %s" % self.mountdir)
if self.rmdir and not self.mounted:
try:
os.rmdir(self.mountdir)
except OSError, e:
pass
self.rmdir = False
def __create(self):
self.disk.create()
def mount(self, options = None):
if self.mounted:
return
if not os.path.isdir(self.mountdir):
msger.debug("Creating mount point %s" % self.mountdir)
os.makedirs(self.mountdir)
self.rmdir = self.rmmountdir
self.__create()
msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir))
if options:
args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ]
else:
args = [ self.mountcmd, self.disk.device, self.mountdir ]
if self.fstype:
args.extend(["-t", self.fstype])
rc = runner.show(args)
if rc != 0:
raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" %
(self.disk.device, self.mountdir, " ".join(args), rc))
self.mounted = True
class ExtDiskMount(DiskMount):
"""A DiskMount object that is able to format/resize ext[23] filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
self.uuid = str(uuid.uuid4())
self.skipformat = skipformat
self.fsopts = fsopts
self.extopts = None
self.dumpe2fs = find_binary_path("dumpe2fs")
self.tune2fs = find_binary_path("tune2fs")
def __parse_field(self, output, field):
for line in output.split("\n"):
if line.startswith(field + ":"):
return line[len(field) + 1:].strip()
raise KeyError("Failed to find field '%s' in output" % field)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b",
str(self.blocksize), "-U", self.uuid]
if self.extopts:
cmdlist.extend(self.extopts.split())
cmdlist.extend([self.disk.device])
rc, errout = runner.runtool(cmdlist, catch=2)
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s:\n%s" %
(self.fstype, self.disk.device, errout))
if not self.extopts:
msger.debug("Tuning filesystem on %s" % self.disk.device)
runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device])
def __resize_filesystem(self, size = None):
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
resize2fs(self.disk.lofile, size)
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.info("Checking filesystem %s" % self.disk.lofile)
runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile])
def __get_size_from_filesystem(self):
return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]),
"Block count")) * self.blocksize
def __resize_to_minimal(self):
self.__fsck()
#
# Use a binary search to find the minimal size
# we can resize the image to
#
bot = 0
top = self.__get_size_from_filesystem()
while top != (bot + 1):
t = bot + ((top - bot) / 2)
if not resize2fs(self.disk.lofile, t):
top = t
else:
bot = t
return top
def resparse(self, size = None):
self.cleanup()
if size == 0:
minsize = 0
else:
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class VfatDiskMount(DiskMount):
"""A DiskMount object that is able to format vfat/msdos filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
rand1 = random.randint(0, 2**16 - 1)
rand2 = random.randint(0, 2**16 - 1)
self.uuid = "%04X-%04X" % (rand1, rand2)
self.skipformat = skipformat
self.fsopts = fsopts
self.fsckcmd = find_binary_path("fsck." + self.fstype)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
rc = runner.show([self.mkfscmd, "-n", self.fslabel,
"-i", self.uuid.replace("-", ""), self.disk.device])
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
msger.verbose("Tuning filesystem on %s" % self.disk.device)
def __resize_filesystem(self, size = None):
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
#resize2fs(self.disk.lofile, size)
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.debug("Checking filesystem %s" % self.disk.lofile)
runner.show([self.fsckcmd, "-y", self.disk.lofile])
def __get_size_from_filesystem(self):
return self.disk.size
def __resize_to_minimal(self):
self.__fsck()
#
# Use a binary search to find the minimal size
# we can resize the image to
#
bot = 0
top = self.__get_size_from_filesystem()
return top
def resparse(self, size = None):
self.cleanup()
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class BtrfsDiskMount(DiskMount):
"""A DiskMount object that is able to format/resize btrfs filesystems."""
def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
self.__check_btrfs()
DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
self.blocksize = blocksize
self.fslabel = fslabel.replace("/", "")
self.uuid = None
self.skipformat = skipformat
self.fsopts = fsopts
self.blkidcmd = find_binary_path("blkid")
self.btrfsckcmd = find_binary_path("btrfsck")
def __check_btrfs(self):
found = False
""" Need to load btrfs module to mount it """
load_module("btrfs")
for line in open("/proc/filesystems").xreadlines():
if line.find("btrfs") > -1:
found = True
break
if not found:
raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.")
# disable selinux, selinux will block write
if os.path.exists("/usr/sbin/setenforce"):
runner.show(["/usr/sbin/setenforce", "0"])
def __parse_field(self, output, field):
for line in output.split(" "):
if line.startswith(field + "="):
return line[len(field) + 1:].strip().replace("\"", "")
raise KeyError("Failed to find field '%s' in output" % field)
def __format_filesystem(self):
if self.skipformat:
msger.debug("Skip filesystem format.")
return
msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device])
if rc != 0:
raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID")
def __resize_filesystem(self, size = None):
current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
if size is None:
size = self.disk.size
if size == current_size:
return
if size > current_size:
self.disk.expand(size)
self.__fsck()
return size
def __create(self):
resize = False
if not self.disk.fixed() and self.disk.exists():
resize = True
self.disk.create()
if resize:
self.__resize_filesystem()
else:
self.__format_filesystem()
def mount(self, options = None):
self.__create()
DiskMount.mount(self, options)
def __fsck(self):
msger.debug("Checking filesystem %s" % self.disk.lofile)
runner.quiet([self.btrfsckcmd, self.disk.lofile])
def __get_size_from_filesystem(self):
return self.disk.size
def __resize_to_minimal(self):
self.__fsck()
return self.__get_size_from_filesystem()
def resparse(self, size = None):
self.cleanup()
minsize = self.__resize_to_minimal()
self.disk.truncate(minsize)
self.__resize_filesystem(size)
return minsize
class DeviceMapperSnapshot(object):
def __init__(self, imgloop, cowloop):
self.imgloop = imgloop
self.cowloop = cowloop
self.__created = False
self.__name = None
self.dmsetupcmd = find_binary_path("dmsetup")
"""Load dm_snapshot if it isn't loaded"""
load_module("dm_snapshot")
def get_path(self):
if self.__name is None:
return None
return os.path.join("/dev/mapper", self.__name)
path = property(get_path)
def create(self):
if self.__created:
return
self.imgloop.create()
self.cowloop.create()
self.__name = "imgcreate-%d-%d" % (os.getpid(),
random.randint(0, 2**16))
size = os.stat(self.imgloop.lofile)[stat.ST_SIZE]
table = "0 %d snapshot %s %s p 8" % (size / 512,
self.imgloop.device,
self.cowloop.device)
args = [self.dmsetupcmd, "create", self.__name, "--table", table]
if runner.show(args) != 0:
self.cowloop.cleanup()
self.imgloop.cleanup()
raise SnapshotError("Could not create snapshot device using: " + ' '.join(args))
self.__created = True
def remove(self, ignore_errors = False):
if not self.__created:
return
time.sleep(2)
rc = runner.show([self.dmsetupcmd, "remove", self.__name])
if not ignore_errors and rc != 0:
raise SnapshotError("Could not remove snapshot device")
self.__name = None
self.__created = False
self.cowloop.cleanup()
self.imgloop.cleanup()
def get_cow_used(self):
if not self.__created:
return 0
#
# dmsetup status on a snapshot returns e.g.
# "0 8388608 snapshot 416/1048576"
# or, more generally:
# "A B snapshot C/D"
# where C is the number of 512 byte sectors in use
#
out = runner.outs([self.dmsetupcmd, "status", self.__name])
try:
return int((out.split()[3]).split('/')[0]) * 512
except ValueError:
raise SnapshotError("Failed to parse dmsetup status: " + out)
def create_image_minimizer(path, image, minimal_size):
"""
Builds a copy-on-write image which can be used to
create a device-mapper snapshot of an image where
the image's filesystem is as small as possible
The steps taken are:
1) Create a sparse COW
2) Loopback mount the image and the COW
3) Create a device-mapper snapshot of the image
using the COW
4) Resize the filesystem to the minimal size
5) Determine the amount of space used in the COW
6) Restroy the device-mapper snapshot
7) Truncate the COW, removing unused space
8) Create a squashfs of the COW
"""
imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter
cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"),
64L * 1024L * 1024L)
snapshot = DeviceMapperSnapshot(imgloop, cowloop)
try:
snapshot.create()
resize2fs(snapshot.path, minimal_size)
cow_used = snapshot.get_cow_used()
finally:
snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None))
cowloop.truncate(cow_used)
mksquashfs(cowloop.lofile, path)
os.unlink(cowloop.lofile)
def load_module(module):
found = False
for line in open('/proc/modules').xreadlines():
if line.startswith("%s " % module):
found = True
break
if not found:
msger.info("Loading %s..." % module)
runner.quiet(['modprobe', module])
class LoopDevice(object):
def __init__(self, loopid=None):
self.device = None
self.loopid = loopid
self.created = False
self.kpartxcmd = find_binary_path("kpartx")
self.losetupcmd = find_binary_path("losetup")
def register(self, device):
self.device = device
self.loopid = None
self.created = True
def reg_atexit(self):
import atexit
atexit.register(self.close)
def _genloopid(self):
import glob
if not glob.glob("/dev/loop[0-9]*"):
return 10
fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0
maxid = 1 + max(filter(lambda x: x<100,
map(fint, glob.glob("/dev/loop[0-9]*"))))
if maxid < 10: maxid = 10
if maxid >= 100: raise
return maxid
def _kpseek(self, device):
rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device])
if rc != 0:
raise MountError("Can't query dm snapshot on %s" % device)
for line in out.splitlines():
if line and line.startswith("loop"):
return True
return False
def _loseek(self, device):
import re
rc, out = runner.runtool([self.losetupcmd, '-a'])
if rc != 0:
raise MountError("Failed to run 'losetup -a'")
for line in out.splitlines():
m = re.match("([^:]+): .*", line)
if m and m.group(1) == device:
return True
return False
def create(self):
if not self.created:
if not self.loopid:
self.loopid = self._genloopid()
self.device = "/dev/loop%d" % self.loopid
if os.path.exists(self.device):
if self._loseek(self.device):
raise MountError("Device busy: %s" % self.device)
else:
self.created = True
return
mknod = find_binary_path('mknod')
rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)])
if rc != 0:
raise MountError("Failed to create device %s" % self.device)
else:
self.created = True
def close(self):
if self.created:
try:
self.cleanup()
self.device = None
except MountError, e:
msger.error("%s" % e)
def cleanup(self):
if self.device is None:
return
if self._kpseek(self.device):
if self.created:
for i in range(3, os.sysconf("SC_OPEN_MAX")):
try:
os.close(i)
except:
pass
runner.quiet([self.kpartxcmd, "-d", self.device])
if self._loseek(self.device):
runner.quiet([self.losetupcmd, "-d", self.device])
# FIXME: should sleep a while between two loseek
if self._loseek(self.device):
msger.warning("Can't cleanup loop device %s" % self.device)
elif self.loopid:
os.unlink(self.device)
DEVICE_PIDFILE_DIR = "/var/tmp/mic/device"
DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock"
def get_loop_device(losetupcmd, lofile):
global DEVICE_PIDFILE_DIR
global DEVICE_LOCKFILE
import fcntl
makedirs(os.path.dirname(DEVICE_LOCKFILE))
fp = open(DEVICE_LOCKFILE, 'w')
fcntl.flock(fp, fcntl.LOCK_EX)
try:
loopdev = None
devinst = LoopDevice()
# clean up left loop device first
clean_loop_devices()
# provide an avaible loop device
rc, out = runner.runtool([losetupcmd, "--find"])
if rc == 0:
loopdev = out.split()[0]
devinst.register(loopdev)
if not loopdev or not os.path.exists(loopdev):
devinst.create()
loopdev = devinst.device
# setup a loop device for image file
rc = runner.show([losetupcmd, loopdev, lofile])
if rc != 0:
raise MountError("Failed to setup loop device for '%s'" % lofile)
devinst.reg_atexit()
# try to save device and pid
makedirs(DEVICE_PIDFILE_DIR)
pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev))
if os.path.exists(pidfile):
os.unlink(pidfile)
with open(pidfile, 'w') as wf:
wf.write(str(os.getpid()))
except MountError, err:
raise CreatorError("%s" % str(err))
except:
raise
finally:
try:
fcntl.flock(fp, fcntl.LOCK_UN)
fp.close()
os.unlink(DEVICE_LOCKFILE)
except:
pass
return loopdev
def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR):
if not os.path.exists(piddir) or not os.path.isdir(piddir):
return
for loopdev in os.listdir(piddir):
pidfile = os.path.join(piddir, loopdev)
try:
with open(pidfile, 'r') as rf:
devpid = int(rf.read())
except:
devpid = None
# if the process using this device is alive, skip it
if not devpid or os.path.exists(os.path.join('/proc', str(devpid))):
continue
# try to clean it up
try:
devinst = LoopDevice()
devinst.register(os.path.join('/dev', loopdev))
devinst.cleanup()
os.unlink(pidfile)
except:
pass
########NEW FILE########
__FILENAME__ = gpt_parser
#!/usr/bin/python -tt
#
# Copyright (c) 2013 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" This module implements a simple GPT partitions parser which can read the
GPT header and the GPT partition table. """
import struct
import uuid
import binascii
from mic.utils.errors import MountError
_GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII"
_GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT)
_GPT_ENTRY_FORMAT = "<16s16sQQQ72s"
_GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT)
_SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00'
def _stringify_uuid(binary_uuid):
""" A small helper function to transform a binary UUID into a string
format. """
uuid_str = str(uuid.UUID(bytes_le = binary_uuid))
return uuid_str.upper()
def _calc_header_crc(raw_hdr):
""" Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to
be a list or a tuple containing all the elements of the GPT header in a
"raw" form, meaning that it should simply contain "unpacked" disk data.
"""
raw_hdr = list(raw_hdr)
raw_hdr[3] = 0
raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
return binascii.crc32(raw_hdr) & 0xFFFFFFFF
def _validate_header(raw_hdr):
""" Validate the GPT header. The 'raw_hdr' parameter has to be a list or a
tuple containing all the elements of the GPT header in a "raw" form,
meaning that it should simply contain "unpacked" disk data. """
# Validate the signature
if raw_hdr[0] != 'EFI PART':
raise MountError("GPT partition table not found")
# Validate the revision
if raw_hdr[1] != _SUPPORTED_GPT_REVISION:
raise MountError("Unsupported GPT revision '%s', supported revision " \
"is '%s'" % \
(binascii.hexlify(raw_hdr[1]),
binascii.hexlify(_SUPPORTED_GPT_REVISION)))
# Validate header size
if raw_hdr[2] != _GPT_HEADER_SIZE:
raise MountError("Bad GPT header size: %d bytes, expected %d" % \
(raw_hdr[2], _GPT_HEADER_SIZE))
crc = _calc_header_crc(raw_hdr)
if raw_hdr[3] != crc:
raise MountError("GPT header crc mismatch: %#x, should be %#x" % \
(crc, raw_hdr[3]))
class GptParser:
""" GPT partition table parser. Allows reading the GPT header and the
partition table, as well as modifying the partition table records. """
def __init__(self, disk_path, sector_size = 512):
""" The class constructor which accepts the following parameters:
* disk_path - full path to the disk image or device node
* sector_size - size of a disk sector in bytes """
self.sector_size = sector_size
self.disk_path = disk_path
try:
self._disk_obj = open(disk_path, 'r+b')
except IOError as err:
raise MountError("Cannot open file '%s' for reading GPT " \
"partitions: %s" % (disk_path, err))
def __del__(self):
""" The class destructor. """
self._disk_obj.close()
def _read_disk(self, offset, size):
""" A helper function which reads 'size' bytes from offset 'offset' of
the disk and checks all the error conditions. """
self._disk_obj.seek(offset)
try:
data = self._disk_obj.read(size)
except IOError as err:
raise MountError("cannot read from '%s': %s" % \
(self.disk_path, err))
if len(data) != size:
raise MountError("cannot read %d bytes from offset '%d' of '%s', " \
"read only %d bytes" % \
(size, offset, self.disk_path, len(data)))
return data
def _write_disk(self, offset, buf):
""" A helper function which writes buffer 'buf' to offset 'offset' of
the disk. This function takes care of unaligned writes and checks all
the error conditions. """
# Since we may be dealing with a block device, we only can write in
# 'self.sector_size' chunks. Find the aligned starting and ending
# disk offsets to read.
start = (offset / self.sector_size) * self.sector_size
end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size
data = self._read_disk(start, end - start)
off = offset - start
data = data[:off] + buf + data[off + len(buf):]
self._disk_obj.seek(start)
try:
self._disk_obj.write(data)
except IOError as err:
raise MountError("cannot write to '%s': %s" % (self.disk_path, err))
def read_header(self, primary = True):
""" Read and verify the GPT header and return a dictionary containing
the following elements:
'signature' : header signature
'revision' : header revision
'hdr_size' : header size in bytes
'hdr_crc' : header CRC32
'hdr_lba' : LBA of this header
'hdr_offs' : byte disk offset of this header
'backup_lba' : backup header LBA
'backup_offs' : byte disk offset of backup header
'first_lba' : first usable LBA for partitions
'first_offs' : first usable byte disk offset for partitions
'last_lba' : last usable LBA for partitions
'last_offs' : last usable byte disk offset for partitions
'disk_uuid' : UUID of the disk
'ptable_lba' : starting LBA of array of partition entries
'ptable_offs' : disk byte offset of the start of the partition table
'ptable_size' : partition table size in bytes
'entries_cnt' : number of available partition table entries
'entry_size' : size of a single partition entry
'ptable_crc' : CRC32 of the partition table
'primary' : a boolean, if 'True', this is the primary GPT header,
if 'False' - the secondary
'primary_str' : contains string "primary" if this is the primary GPT
header, and "backup" otherwise
This dictionary corresponds to the GPT header format. Please, see the
UEFI standard for the description of these fields.
If the 'primary' parameter is 'True', the primary GPT header is read,
otherwise the backup GPT header is read instead. """
# Read and validate the primary GPT header
raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE)
raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
_validate_header(raw_hdr)
primary_str = "primary"
if not primary:
# Read and validate the backup GPT header
raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE)
raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
_validate_header(raw_hdr)
primary_str = "backup"
return { 'signature' : raw_hdr[0],
'revision' : raw_hdr[1],
'hdr_size' : raw_hdr[2],
'hdr_crc' : raw_hdr[3],
'hdr_lba' : raw_hdr[5],
'hdr_offs' : raw_hdr[5] * self.sector_size,
'backup_lba' : raw_hdr[6],
'backup_offs' : raw_hdr[6] * self.sector_size,
'first_lba' : raw_hdr[7],
'first_offs' : raw_hdr[7] * self.sector_size,
'last_lba' : raw_hdr[8],
'last_offs' : raw_hdr[8] * self.sector_size,
'disk_uuid' :_stringify_uuid(raw_hdr[9]),
'ptable_lba' : raw_hdr[10],
'ptable_offs' : raw_hdr[10] * self.sector_size,
'ptable_size' : raw_hdr[11] * raw_hdr[12],
'entries_cnt' : raw_hdr[11],
'entry_size' : raw_hdr[12],
'ptable_crc' : raw_hdr[13],
'primary' : primary,
'primary_str' : primary_str }
def _read_raw_ptable(self, header):
""" Read and validate primary or backup partition table. The 'header'
argument is the GPT header. If it is the primary GPT header, then the
primary partition table is read and validated, otherwise - the backup
one. The 'header' argument is a dictionary which is returned by the
'read_header()' method. """
raw_ptable = self._read_disk(header['ptable_offs'],
header['ptable_size'])
crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF
if crc != header['ptable_crc']:
raise MountError("Partition table at LBA %d (%s) is corrupted" % \
(header['ptable_lba'], header['primary_str']))
return raw_ptable
def get_partitions(self, primary = True):
""" This is a generator which parses the GPT partition table and
generates the following dictionary for each partition:
'index' : the index of the partition table endry
'offs' : byte disk offset of the partition table entry
'type_uuid' : partition type UUID
'part_uuid' : partition UUID
'first_lba' : the first LBA
'last_lba' : the last LBA
'flags' : attribute flags
'name' : partition name
'primary' : a boolean, if 'True', this is the primary partition
table, if 'False' - the secondary
'primary_str' : contains string "primary" if this is the primary GPT
header, and "backup" otherwise
This dictionary corresponds to the GPT header format. Please, see the
UEFI standard for the description of these fields.
If the 'primary' parameter is 'True', partitions from the primary GPT
partition table are generated, otherwise partitions from the backup GPT
partition table are generated. """
if primary:
primary_str = "primary"
else:
primary_str = "backup"
header = self.read_header(primary)
raw_ptable = self._read_raw_ptable(header)
for index in xrange(0, header['entries_cnt']):
start = header['entry_size'] * index
end = start + header['entry_size']
raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end])
if raw_entry[2] == 0 or raw_entry[3] == 0:
continue
part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0])
yield { 'index' : index,
'offs' : header['ptable_offs'] + start,
'type_uuid' : _stringify_uuid(raw_entry[0]),
'part_uuid' : _stringify_uuid(raw_entry[1]),
'first_lba' : raw_entry[2],
'last_lba' : raw_entry[3],
'flags' : raw_entry[4],
'name' : part_name,
'primary' : primary,
'primary_str' : primary_str }
def _change_partition(self, header, entry):
""" A helper function for 'change_partitions()' which changes a
a paricular instance of the partition table (primary or backup). """
if entry['index'] >= header['entries_cnt']:
raise MountError("Partition table at LBA %d has only %d " \
"records cannot change record number %d" % \
(header['entries_cnt'], entry['index']))
# Read raw GPT header
raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE)
raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr))
_validate_header(raw_hdr)
# Prepare the new partition table entry
raw_entry = struct.pack(_GPT_ENTRY_FORMAT,
uuid.UUID(entry['type_uuid']).bytes_le,
uuid.UUID(entry['part_uuid']).bytes_le,
entry['first_lba'],
entry['last_lba'],
entry['flags'],
entry['name'].encode('UTF-16'))
# Write the updated entry to the disk
entry_offs = header['ptable_offs'] + \
header['entry_size'] * entry['index']
self._write_disk(entry_offs, raw_entry)
# Calculate and update partition table CRC32
raw_ptable = self._read_disk(header['ptable_offs'],
header['ptable_size'])
raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF
# Calculate and update the GPT header CRC
raw_hdr[3] = _calc_header_crc(raw_hdr)
# Write the updated header to the disk
raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
self._write_disk(header['hdr_offs'], raw_hdr)
def change_partition(self, entry):
""" Change a GPT partition. The 'entry' argument has the same format as
'get_partitions()' returns. This function simply changes the partition
table record corresponding to 'entry' in both, the primary and the
backup GPT partition tables. The parition table CRC is re-calculated
and the GPT headers are modified accordingly. """
# Change the primary partition table
header = self.read_header(True)
self._change_partition(header, entry)
# Change the backup partition table
header = self.read_header(False)
self._change_partition(header, entry)
########NEW FILE########
__FILENAME__ = grabber
#!/usr/bin/python
import os
import sys
import rpm
import fcntl
import struct
import termios
from mic import msger
from mic.utils import runner
from mic.utils.errors import CreatorError
from urlgrabber import grabber
from urlgrabber import __version__ as grabber_version
if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1:
msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', "
"you may encounter some network issues" % grabber_version)
def myurlgrab(url, filename, proxies, progress_obj = None):
g = grabber.URLGrabber()
if progress_obj is None:
progress_obj = TextProgress()
if url.startswith("file:/"):
filepath = "/%s" % url.replace("file:", "").lstrip('/')
if not os.path.exists(filepath):
raise CreatorError("URLGrabber error: can't find file %s" % url)
if url.endswith('.rpm'):
return filepath
else:
# untouch repometadata in source path
runner.show(['cp', '-f', filepath, filename])
else:
try:
filename = g.urlgrab(url=str(url),
filename=filename,
ssl_verify_host=False,
ssl_verify_peer=False,
proxies=proxies,
http_headers=(('Pragma', 'no-cache'),),
quote=0,
progress_obj=progress_obj)
except grabber.URLGrabError, err:
msg = str(err)
if msg.find(url) < 0:
msg += ' on %s' % url
raise CreatorError(msg)
return filename
def terminal_width(fd=1):
""" Get the real terminal width """
try:
buf = 'abcdefgh'
buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
return struct.unpack('hhhh', buf)[1]
except: # IOError
return 80
def truncate_url(url, width):
return os.path.basename(url)[0:width]
class TextProgress(object):
# make the class as singleton
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, totalnum = None):
self.total = totalnum
self.counter = 1
def start(self, filename, url, *args, **kwargs):
self.url = url
self.termwidth = terminal_width()
msger.info("\r%-*s" % (self.termwidth, " "))
if self.total is None:
msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15))
else:
msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total))
def update(self, *args):
pass
def end(self, *args):
if self.counter == self.total:
msger.raw("\n")
if self.total is not None:
self.counter += 1
########NEW FILE########
__FILENAME__ = misc
#!/usr/bin/python -tt
#
# Copyright (c) 2010, 2011 Intel Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import sys
import time
import tempfile
import re
import shutil
import glob
import hashlib
import subprocess
import platform
import traceback
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
try:
from xml.etree import cElementTree
except ImportError:
import cElementTree
xmlparse = cElementTree.parse
from mic import msger
from mic.utils.errors import CreatorError, SquashfsError
from mic.utils.fs_related import find_binary_path, makedirs
from mic.utils.proxy import get_proxy_for
from mic.utils import runner
RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)")
RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s"
SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm")
def build_name(kscfg, release=None, prefix = None, suffix = None):
"""Construct and return an image name string.
This is a utility function to help create sensible name and fslabel
strings. The name is constructed using the sans-prefix-and-extension
kickstart filename and the supplied prefix and suffix.
kscfg -- a path to a kickstart file
release -- a replacement to suffix for image release
prefix -- a prefix to prepend to the name; defaults to None, which causes
no prefix to be used
suffix -- a suffix to append to the name; defaults to None, which causes
a YYYYMMDDHHMM suffix to be used
Note, if maxlen is less then the len(suffix), you get to keep both pieces.
"""
name = os.path.basename(kscfg)
idx = name.rfind('.')
if idx >= 0:
name = name[:idx]
if release is not None:
suffix = ""
if prefix is None:
prefix = ""
if suffix is None:
suffix = time.strftime("%Y%m%d%H%M")
if name.startswith(prefix):
name = name[len(prefix):]
prefix = "%s-" % prefix if prefix else ""
suffix = "-%s" % suffix if suffix else ""
ret = prefix + name + suffix
return ret
def get_distro():
"""Detect linux distribution, support "meego"
"""
support_dists = ('SuSE',
'debian',
'fedora',
'redhat',
'centos',
'meego',
'moblin',
'tizen')
try:
(dist, ver, id) = platform.linux_distribution( \
supported_dists = support_dists)
except:
(dist, ver, id) = platform.dist( \
supported_dists = support_dists)
return (dist, ver, id)
def get_distro_str():
"""Get composited string for current linux distribution
"""
(dist, ver, id) = get_distro()
if not dist:
return 'Unknown Linux Distro'
else:
distro_str = ' '.join(map(str.strip, (dist, ver, id)))
return distro_str.strip()
_LOOP_RULE_PTH = None
def hide_loopdev_presentation():
udev_rules = "80-prevent-loop-present.rules"
udev_rules_dir = [
'/usr/lib/udev/rules.d/',
'/lib/udev/rules.d/',
'/etc/udev/rules.d/'
]
global _LOOP_RULE_PTH
for rdir in udev_rules_dir:
if os.path.exists(rdir):
_LOOP_RULE_PTH = os.path.join(rdir, udev_rules)
if not _LOOP_RULE_PTH:
return
try:
with open(_LOOP_RULE_PTH, 'w') as wf:
wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"')
runner.quiet('udevadm trigger')
except:
pass
def unhide_loopdev_presentation():
global _LOOP_RULE_PTH
if not _LOOP_RULE_PTH:
return
try:
os.unlink(_LOOP_RULE_PTH)
runner.quiet('udevadm trigger')
except:
pass
def extract_rpm(rpmfile, targetdir):
rpm2cpio = find_binary_path("rpm2cpio")
cpio = find_binary_path("cpio")
olddir = os.getcwd()
os.chdir(targetdir)
msger.verbose("Extract rpm file with cpio: %s" % rpmfile)
p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE)
p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(sout, serr) = p2.communicate()
msger.verbose(sout or serr)
os.chdir(olddir)
def compressing(fpath, method):
comp_map = {
"gz": "gzip",
"bz2": "bzip2"
}
if method not in comp_map:
raise CreatorError("Unsupport compress format: %s, valid values: %s"
% (method, ','.join(comp_map.keys())))
cmd = find_binary_path(comp_map[method])
rc = runner.show([cmd, "-f", fpath])
if rc:
raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath))
def taring(dstfile, target):
import tarfile
basen, ext = os.path.splitext(dstfile)
comp = {".tar": None,
".gz": "gz", # for .tar.gz
".bz2": "bz2", # for .tar.bz2
".tgz": "gz",
".tbz": "bz2"}[ext]
# specify tarball file path
if not comp:
tarpath = dstfile
elif basen.endswith(".tar"):
tarpath = basen
else:
tarpath = basen + ".tar"
wf = tarfile.open(tarpath, 'w')
if os.path.isdir(target):
for item in os.listdir(target):
wf.add(os.path.join(target, item), item)
else:
wf.add(target, os.path.basename(target))
wf.close()
if comp:
compressing(tarpath, comp)
# when dstfile ext is ".tgz" and ".tbz", should rename
if not basen.endswith(".tar"):
shutil.move("%s.%s" % (tarpath, comp), dstfile)
def ziping(dstfile, target):
import zipfile
wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED)
if os.path.isdir(target):
for item in os.listdir(target):
fpath = os.path.join(target, item)
if not os.path.isfile(fpath):
continue
wf.write(fpath, item, zipfile.ZIP_DEFLATED)
else:
wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED)
wf.close()
pack_formats = {
".tar": taring,
".tar.gz": taring,
".tar.bz2": taring,
".tgz": taring,
".tbz": taring,
".zip": ziping,
}
def packing(dstfile, target):
(base, ext) = os.path.splitext(dstfile)
if ext in (".gz", ".bz2") and base.endswith(".tar"):
ext = ".tar" + ext
if ext not in pack_formats:
raise CreatorError("Unsupport pack format: %s, valid values: %s"
% (ext, ','.join(pack_formats.keys())))
func = pack_formats[ext]
# func should be callable
func(dstfile, target)
def human_size(size):
"""Return human readable string for Bytes size
"""
if size <= 0:
return "0M"
import math
measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
expo = int(math.log(size, 1024))
mant = float(size/math.pow(1024, expo))
return "{0:.1f}{1:s}".format(mant, measure[expo])
def get_block_size(file_obj):
""" Returns block size for file object 'file_obj'. Errors are indicated by
the 'IOError' exception. """
from fcntl import ioctl
import struct
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
return struct.unpack('I', binary_data)[0]
def check_space_pre_cp(src, dst):
"""Check whether disk space is enough before 'cp' like
operations, else exception will be raised.
"""
srcsize = get_file_size(src) * 1024 * 1024
freesize = get_filesystem_avail(dst)
if srcsize > freesize:
raise CreatorError("space on %s(%s) is not enough for about %s files"
% (dst, human_size(freesize), human_size(srcsize)))
def calc_hashes(file_path, hash_names, start = 0, end = None):
""" Calculate hashes for a file. The 'file_path' argument is the file
to calculate hash functions for, 'start' and 'end' are the starting and
ending file offset to calculate the has functions for. The 'hash_names'
argument is a list of hash names to calculate. Returns the the list
of calculated hash values in the hexadecimal form in the same order
as 'hash_names'.
"""
if end == None:
end = os.path.getsize(file_path)
chunk_size = 65536
to_read = end - start
read = 0
hashes = []
for hash_name in hash_names:
hashes.append(hashlib.new(hash_name))
with open(file_path, "rb") as f:
f.seek(start)
while read < to_read:
if read + chunk_size > to_read:
chunk_size = to_read - read
chunk = f.read(chunk_size)
for hash_obj in hashes:
hash_obj.update(chunk)
read += chunk_size
result = []
for hash_obj in hashes:
result.append(hash_obj.hexdigest())
return result
def get_md5sum(fpath):
return calc_hashes(fpath, ('md5', ))[0]
def normalize_ksfile(ksconf, release, arch):
'''
Return the name of a normalized ks file in which macro variables
@BUILD_ID@ and @ARCH@ are replace with real values.
The original ks file is returned if no special macro is used, otherwise
a temp file is created and returned, which will be deleted when program
exits normally.
'''
if not release:
release = "latest"
if not arch or re.match(r'i.86', arch):
arch = "ia32"
with open(ksconf) as f:
ksc = f.read()
if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc:
return ksconf
msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf)
ksc = ksc.replace("@ARCH@", arch)
ksc = ksc.replace("@BUILD_ID@", release)
fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf))
os.write(fd, ksc)
os.close(fd)
msger.debug('normalized ks file:%s' % ksconf)
def remove_temp_ks():
try:
os.unlink(ksconf)
except OSError, err:
msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err))
import atexit
atexit.register(remove_temp_ks)
return ksconf
def _check_mic_chroot(rootdir):
def _path(path):
return rootdir.rstrip('/') + path
release_files = map(_path, [ "/etc/moblin-release",
"/etc/meego-release",
"/etc/tizen-release"])
if not any(map(os.path.exists, release_files)):
msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir)
if not glob.glob(rootdir + "/boot/vmlinuz-*"):
msger.warning("Failed to find kernel module under %s" % rootdir)
return
def selinux_check(arch, fstypes):
try:
getenforce = find_binary_path('getenforce')
except CreatorError:
return
selinux_status = runner.outs([getenforce])
if arch and arch.startswith("arm") and selinux_status == "Enforcing":
raise CreatorError("Can't create arm image if selinux is enabled, "
"please run 'setenforce 0' to disable selinux")
use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes)
if use_btrfs and selinux_status == "Enforcing":
raise CreatorError("Can't create btrfs image if selinux is enabled,"
" please run 'setenforce 0' to disable selinux")
def get_image_type(path):
def _get_extension_name(path):
match = re.search("(?<=\.)\w+$", path)
if match:
return match.group(0)
else:
return None
if os.path.isdir(path):
_check_mic_chroot(path)
return "fs"
maptab = {
"tar": "loop",
"raw":"raw",
"vmdk":"vmdk",
"vdi":"vdi",
"iso":"livecd",
"usbimg":"liveusb",
}
extension = _get_extension_name(path)
if extension in maptab:
return maptab[extension]
fd = open(path, "rb")
file_header = fd.read(1024)
fd.close()
vdi_flag = "<<< Sun VirtualBox Disk Image >>>"
if file_header[0:len(vdi_flag)] == vdi_flag:
return maptab["vdi"]
output = runner.outs(['file', path])
isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*")
usbimgptn = re.compile(r".*x86 boot sector.*active.*")
rawptn = re.compile(r".*x86 boot sector.*")
vmdkptn = re.compile(r".*VMware. disk image.*")
ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*")
ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*")
btrfsimgptn = re.compile(r".*BTRFS.*")
if isoptn.match(output):
return maptab["iso"]
elif usbimgptn.match(output):
return maptab["usbimg"]
elif rawptn.match(output):
return maptab["raw"]
elif vmdkptn.match(output):
return maptab["vmdk"]
elif ext3fsimgptn.match(output):
return "ext3fsimg"
elif ext4fsimgptn.match(output):
return "ext4fsimg"
elif btrfsimgptn.match(output):
return "btrfsimg"
else:
raise CreatorError("Cannot detect the type of image: %s" % path)
def get_file_size(filename):
""" Return size in MB unit """
cmd = ['du', "-s", "-b", "-B", "1M", filename]
rc, duOutput = runner.runtool(cmd)
if rc != 0:
raise CreatorError("Failed to run: %s" % ' '.join(cmd))
size1 = int(duOutput.split()[0])
cmd = ['du', "-s", "-B", "1M", filename]
rc, duOutput = runner.runtool(cmd)
if rc != 0:
raise CreatorError("Failed to run: %s" % ' '.join(cmd))
size2 = int(duOutput.split()[0])
return max(size1, size2)
def get_filesystem_avail(fs):
vfstat = os.statvfs(fs)
return vfstat.f_bavail * vfstat.f_bsize
def convert_image(srcimg, srcfmt, dstimg, dstfmt):
#convert disk format
if dstfmt != "raw":
raise CreatorError("Invalid destination image format: %s" % dstfmt)
msger.debug("converting %s image to %s" % (srcimg, dstimg))
if srcfmt == "vmdk":
path = find_binary_path("qemu-img")
argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg]
elif srcfmt == "vdi":
path = find_binary_path("VBoxManage")
argv = [path, "internalcommands", "converttoraw", srcimg, dstimg]
else:
raise CreatorError("Invalid soure image format: %s" % srcfmt)
rc = runner.show(argv)
if rc == 0:
msger.debug("convert successful")
if rc != 0:
raise CreatorError("Unable to convert disk to %s" % dstfmt)
def uncompress_squashfs(squashfsimg, outdir):
"""Uncompress file system from squshfs image"""
unsquashfs = find_binary_path("unsquashfs")
args = [ unsquashfs, "-d", outdir, squashfsimg ]
rc = runner.show(args)
if (rc != 0):
raise SquashfsError("Failed to uncompress %s." % squashfsimg)
def mkdtemp(dir = "/var/tmp", prefix = "wic-tmp-"):
""" FIXME: use the dir in wic.conf instead """
makedirs(dir)
return tempfile.mkdtemp(dir = dir, prefix = prefix)
def get_repostrs_from_ks(ks):
def _get_temp_reponame(baseurl):
md5obj = hashlib.md5(baseurl)
tmpreponame = "%s" % md5obj.hexdigest()
return tmpreponame
kickstart_repos = []
for repodata in ks.handler.repo.repoList:
repo = {}
for attr in ('name',
'baseurl',
'mirrorlist',
'includepkgs', # val is list
'excludepkgs', # val is list
'cost', # int
'priority',# int
'save',
'proxy',
'proxyuser',
'proxypasswd',
'proxypasswd',
'debuginfo',
'source',
'gpgkey',
'ssl_verify'):
if hasattr(repodata, attr) and getattr(repodata, attr):
repo[attr] = getattr(repodata, attr)
if 'name' not in repo:
repo['name'] = _get_temp_reponame(repodata.baseurl)
kickstart_repos.append(repo)
return kickstart_repos
def _get_uncompressed_data_from_url(url, filename, proxies):
filename = myurlgrab(url, filename, proxies)
suffix = None
if filename.endswith(".gz"):
suffix = ".gz"
runner.quiet(['gunzip', "-f", filename])
elif filename.endswith(".bz2"):
suffix = ".bz2"
runner.quiet(['bunzip2', "-f", filename])
if suffix:
filename = filename.replace(suffix, "")
return filename
def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
sumtype=None, checksum=None):
url = os.path.join(baseurl, filename)
filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename)))
if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
filename = os.path.splitext(filename_tmp)[0]
else:
filename = filename_tmp
if sumtype and checksum and os.path.exists(filename):
try:
sumcmd = find_binary_path("%ssum" % sumtype)
except:
file_checksum = None
else:
file_checksum = runner.outs([sumcmd, filename]).split()[0]
if file_checksum and file_checksum == checksum:
return filename
return _get_uncompressed_data_from_url(url,filename_tmp,proxies)
def get_metadata_from_repos(repos, cachedir):
my_repo_metadata = []
for repo in repos:
reponame = repo['name']
baseurl = repo['baseurl']
if 'proxy' in repo:
proxy = repo['proxy']
else:
proxy = get_proxy_for(baseurl)
proxies = None
if proxy:
proxies = {str(baseurl.split(":")[0]):str(proxy)}
makedirs(os.path.join(cachedir, reponame))
url = os.path.join(baseurl, "repodata/repomd.xml")
filename = os.path.join(cachedir, reponame, 'repomd.xml')
repomd = myurlgrab(url, filename, proxies)
try:
root = xmlparse(repomd)
except SyntaxError:
raise CreatorError("repomd.xml syntax error.")
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
filepaths = {}
checksums = {}
sumtypes = {}
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] == "patterns":
filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href']
checksums['patterns'] = elm.find("%sopen-checksum" % ns).text
sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] in ("group_gz", "group"):
filepaths['comps'] = elm.find("%slocation" % ns).attrib['href']
checksums['comps'] = elm.find("%sopen-checksum" % ns).text
sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
primary_type = None
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] in ("primary_db", "primary"):
primary_type = elm.attrib["type"]
filepaths['primary'] = elm.find("%slocation" % ns).attrib['href']
checksums['primary'] = elm.find("%sopen-checksum" % ns).text
sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
if not primary_type:
continue
for item in ("primary", "patterns", "comps"):
if item not in filepaths:
filepaths[item] = None
continue
if not filepaths[item]:
continue
filepaths[item] = _get_metadata_from_repo(baseurl,
proxies,
cachedir,
reponame,
filepaths[item],
sumtypes[item],
checksums[item])
""" Get repo key """
try:
repokey = _get_metadata_from_repo(baseurl,
proxies,
cachedir,
reponame,
"repodata/repomd.xml.key")
except CreatorError:
repokey = None
msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key"))
my_repo_metadata.append({"name":reponame,
"baseurl":baseurl,
"repomd":repomd,
"primary":filepaths['primary'],
"cachedir":cachedir,
"proxies":proxies,
"patterns":filepaths['patterns'],
"comps":filepaths['comps'],
"repokey":repokey})
return my_repo_metadata
def get_rpmver_in_repo(repometadata):
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
versionlist = []
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == 'rpm':
for node in elm.getchildren():
if node.tag == "%sversion" % ns:
versionlist.append(node.attrib['ver'])
if versionlist:
return reversed(
sorted(
versionlist,
key = lambda ver: map(int, ver.split('.')))).next()
elif repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select version from packages where "
"name=\"rpm\" ORDER by version DESC"):
con.close()
return row[0]
return None
def get_arch(repometadata):
archlist = []
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sarch" % ns).text not in ("noarch", "src"):
arch = elm.find("%sarch" % ns).text
if arch not in archlist:
archlist.append(arch)
elif repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"):
if row[0] not in archlist:
archlist.append(row[0])
con.close()
uniq_arch = []
for i in range(len(archlist)):
if archlist[i] not in rpmmisc.archPolicies.keys():
continue
need_append = True
j = 0
while j < len(uniq_arch):
if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'):
need_append = False
break
if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'):
if need_append:
uniq_arch[j] = archlist[i]
need_append = False
else:
uniq_arch.remove(uniq_arch[j])
continue
j += 1
if need_append:
uniq_arch.append(archlist[i])
return uniq_arch, archlist
def get_package(pkg, repometadata, arch = None):
ver = ""
target_repo = None
if not arch:
arches = []
elif arch not in rpmmisc.archPolicies:
arches = [arch]
else:
arches = rpmmisc.archPolicies[arch].split(':')
arches.append('noarch')
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == pkg:
if elm.find("%sarch" % ns).text in arches:
version = elm.find("%sversion" % ns)
tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
if tmpver > ver:
ver = tmpver
location = elm.find("%slocation" % ns)
pkgpath = "%s" % location.attrib['href']
target_repo = repo
break
if repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
if arch:
sql = 'select version, release, location_href from packages ' \
'where name = "%s" and arch IN ("%s")' % \
(pkg, '","'.join(arches))
for row in con.execute(sql):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
ver = tmpver
pkgpath = "%s" % row[2]
target_repo = repo
break
else:
sql = 'select version, release, location_href from packages ' \
'where name = "%s"' % pkg
for row in con.execute(sql):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
ver = tmpver
pkgpath = "%s" % row[2]
target_repo = repo
break
con.close()
if target_repo:
makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"]))
url = os.path.join(target_repo["baseurl"], pkgpath)
filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
if os.path.exists(filename):
ret = rpmmisc.checkRpmIntegrity('rpm', filename)
if ret == 0:
return filename
msger.warning("package %s is damaged: %s" %
(os.path.basename(filename), filename))
os.unlink(filename)
pkg = myurlgrab(str(url), filename, target_repo["proxies"])
return pkg
else:
return None
def get_source_name(pkg, repometadata):
def get_bin_name(pkg):
m = RPM_RE.match(pkg)
if m:
return m.group(1)
return None
def get_src_name(srpm):
m = SRPM_RE.match(srpm)
if m:
return m.group(1)
return None
ver = ""
target_repo = None
pkg_name = get_bin_name(pkg)
if not pkg_name:
return None
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == pkg_name:
if elm.find("%sarch" % ns).text != "src":
version = elm.find("%sversion" % ns)
tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
if tmpver > ver:
ver = tmpver
fmt = elm.find("%sformat" % ns)
if fmt:
fns = fmt.getchildren()[0].tag
fns = fns[0:fns.rindex("}")+1]
pkgpath = fmt.find("%ssourcerpm" % fns).text
target_repo = repo
break
if repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
pkgpath = "%s" % row[2]
target_repo = repo
break
con.close()
if target_repo:
return get_src_name(pkgpath)
else:
return None
def get_pkglist_in_patterns(group, patterns):
found = False
pkglist = []
try:
root = xmlparse(patterns)
except SyntaxError:
raise SyntaxError("%s syntax error." % patterns)
for elm in list(root.getroot()):
ns = elm.tag
ns = ns[0:ns.rindex("}")+1]
name = elm.find("%sname" % ns)
summary = elm.find("%ssummary" % ns)
if name.text == group or summary.text == group:
found = True
break
if not found:
return pkglist
found = False
for requires in list(elm):
if requires.tag.endswith("requires"):
found = True
break
if not found:
return pkglist
for pkg in list(requires):
pkgname = pkg.attrib["name"]
if pkgname not in pkglist:
pkglist.append(pkgname)
return pkglist
def get_pkglist_in_comps(group, comps):
found = False
pkglist = []
try:
root = xmlparse(comps)
except SyntaxError:
raise SyntaxError("%s syntax error." % comps)
for elm in root.getiterator("group"):
id = elm.find("id")
name = elm.find("name")
if id.text == group or name.text == group:
packagelist = elm.find("packagelist")
found = True
break
if not found:
return pkglist
for require in elm.getiterator("packagereq"):
if require.tag.endswith("packagereq"):
pkgname = require.text
if pkgname not in pkglist:
pkglist.append(pkgname)
return pkglist
def is_statically_linked(binary):
return ", statically linked, " in runner.outs(['file', binary])
def setup_qemu_emulator(rootdir, arch):
# mount binfmt_misc if it doesn't exist
if not os.path.exists("/proc/sys/fs/binfmt_misc"):
modprobecmd = find_binary_path("modprobe")
runner.show([modprobecmd, "binfmt_misc"])
if not os.path.exists("/proc/sys/fs/binfmt_misc/register"):
mountcmd = find_binary_path("mount")
runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"])
# qemu_emulator is a special case, we can't use find_binary_path
# qemu emulator should be a statically-linked executable file
qemu_emulator = "/usr/bin/qemu-arm"
if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator):
qemu_emulator = "/usr/bin/qemu-arm-static"
if not os.path.exists(qemu_emulator):
raise CreatorError("Please install a statically-linked qemu-arm")
# qemu emulator version check
armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')]
if arch in armv7_list: # need qemu (>=0.13.0)
qemuout = runner.outs([qemu_emulator, "-h"])
m = re.search("version\s*([.\d]+)", qemuout)
if m:
qemu_version = m.group(1)
if qemu_version < "0.13":
raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch))
else:
msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator)
if not os.path.exists(rootdir + "/usr/bin"):
makedirs(rootdir + "/usr/bin")
shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static")
qemu_emulator = "/usr/bin/qemu-arm-static"
# disable selinux, selinux will block qemu emulator to run
if os.path.exists("/usr/sbin/setenforce"):
msger.info('Try to disable selinux')
runner.show(["/usr/sbin/setenforce", "0"])
# unregister it if it has been registered and is a dynamically-linked executable
node = "/proc/sys/fs/binfmt_misc/arm"
if os.path.exists(node):
qemu_unregister_string = "-1\n"
fd = open("/proc/sys/fs/binfmt_misc/arm", "w")
fd.write(qemu_unregister_string)
fd.close()
# register qemu emulator for interpreting other arch executable file
if not os.path.exists(node):
qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator
fd = open("/proc/sys/fs/binfmt_misc/register", "w")
fd.write(qemu_arm_string)
fd.close()
return qemu_emulator
def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir):
def get_source_repometadata(repometadata):
src_repometadata=[]
for repo in repometadata:
if repo["name"].endswith("-source"):
src_repometadata.append(repo)
if src_repometadata:
return src_repometadata
return None
def get_src_name(srpm):
m = SRPM_RE.match(srpm)
if m:
return m.group(1)
return None
src_repometadata = get_source_repometadata(repometadata)
if not src_repometadata:
msger.warning("No source repo found")
return None
src_pkgs = []
lpkgs_dict = {}
lpkgs_path = []
for repo in src_repometadata:
cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"])
lpkgs_path += glob.glob(cachepath)
for lpkg in lpkgs_path:
lpkg_name = get_src_name(os.path.basename(lpkg))
lpkgs_dict[lpkg_name] = lpkg
localpkgs = lpkgs_dict.keys()
cached_count = 0
destdir = instroot+'/usr/src/SRPMS'
if not os.path.exists(destdir):
os.makedirs(destdir)
srcpkgset = set()
for _pkg in pkgs:
srcpkg_name = get_source_name(_pkg, repometadata)
if not srcpkg_name:
continue
srcpkgset.add(srcpkg_name)
for pkg in list(srcpkgset):
if pkg in localpkgs:
cached_count += 1
shutil.copy(lpkgs_dict[pkg], destdir)
src_pkgs.append(os.path.basename(lpkgs_dict[pkg]))
else:
src_pkg = get_package(pkg, src_repometadata, 'src')
if src_pkg:
shutil.copy(src_pkg, destdir)
src_pkgs.append(src_pkg)
msger.info("%d source packages gotten from cache" % cached_count)
return src_pkgs
def strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:-len(suffix)]
########NEW FILE########
__FILENAME__ = misc
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module provides a place to collect various mic-related utils
# for the OpenEmbedded Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
from mic import msger
from mic.utils import runner
def exec_cmd(cmd_and_args, as_shell = False, catch = 3):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
msger.debug("exec_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
if (as_shell):
rc, out = runner.runtool(cmd_and_args, catch)
else:
rc, out = runner.runtool(args, catch)
out = out.strip()
msger.debug("exec_cmd: output for %s (rc = %d): %s" % \
(cmd_and_args, rc, out))
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing real errors based on return
# code.
msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc))
return (rc, out)
def exec_cmd_quiet(cmd_and_args, as_shell = False):
"""
Execute command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
"""
return exec_cmd(cmd_and_args, as_shell, 0)
def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3):
"""
Execute native command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
native_paths = \
"export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \
(native_sysroot, native_sysroot, native_sysroot)
native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args)
msger.debug("exec_native_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
rc, out = exec_cmd(native_cmd_and_args, True, catch)
if rc == 127: # shell command-not-found
msger.error("A native (host) program required to build the image "
"was not found (see details above). Please make sure "
"it's installed and try again.")
return (rc, out)
def exec_native_cmd_quiet(cmd_and_args, native_sysroot):
"""
Execute native command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
return exec_native_cmd(cmd_and_args, native_sysroot, 0)
# kickstart doesn't support variable substution in commands, so this
# is our current simplistic scheme for supporting that
wks_vars = dict()
def get_wks_var(key):
return wks_vars[key]
def add_wks_var(key, val):
wks_vars[key] = val
BOOTDD_EXTRA_SPACE = 16384
IMAGE_EXTRA_SPACE = 10240
__bitbake_env_lines = ""
def set_bitbake_env_lines(bitbake_env_lines):
global __bitbake_env_lines
__bitbake_env_lines = bitbake_env_lines
def get_bitbake_env_lines():
return __bitbake_env_lines
def find_bitbake_env_lines(image_name):
"""
If image_name is empty, plugins might still be able to use the
environment, so set it regardless.
"""
if image_name:
bitbake_env_cmd = "bitbake -e %s" % image_name
else:
bitbake_env_cmd = "bitbake -e"
rc, bitbake_env_lines = exec_cmd(bitbake_env_cmd)
if rc != 0:
print "Couldn't get '%s' output." % bitbake_env_cmd
return None
return bitbake_env_lines
def find_artifact(bitbake_env_lines, variable):
"""
Gather the build artifact for the current image (the image_name
e.g. core-image-minimal) for the current MACHINE set in local.conf
"""
retval = ""
for line in bitbake_env_lines.split('\n'):
if (get_line_val(line, variable)):
retval = get_line_val(line, variable)
break
return retval
def get_line_val(line, key):
"""
Extract the value from the VAR="val" string
"""
if line.startswith(key + "="):
stripped_line = line.split('=')[1]
stripped_line = stripped_line.replace('\"', '')
return stripped_line
return None
def get_bitbake_var(key):
for line in __bitbake_env_lines.split('\n'):
if (get_line_val(line, key)):
val = get_line_val(line, key)
return val
return None
########NEW FILE########
__FILENAME__ = partitionedfs
#!/usr/bin/python -tt
#
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
# Copyright (c) 2007, 2008 Red Hat, Inc.
# Copyright (c) 2008 Daniel P. Berrange
# Copyright (c) 2008 David P. Huff
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
from mic import msger
from mic.utils import runner
from mic.utils.errors import MountError
from mic.utils.fs_related import *
from mic.utils.gpt_parser import GptParser
from mic.utils.oe.misc import *
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
# Overhead of the GPT partitioning scheme
GPT_OVERHEAD = 34
# Size of a sector in bytes
SECTOR_SIZE = 512
class PartitionedMount(Mount):
def __init__(self, mountdir, skipformat = False):
Mount.__init__(self, mountdir)
self.disks = {}
self.partitions = []
self.subvolumes = []
self.mapped = False
self.mountOrder = []
self.unmountOrder = []
self.parted = find_binary_path("parted")
self.btrfscmd=None
self.skipformat = skipformat
self.snapshot_created = self.skipformat
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self._partitions_layed_out = False
def __add_disk(self, disk_name):
""" Add a disk 'disk_name' to the internal list of disks. Note,
'disk_name' is the name of the disk in the target system
(e.g., sdb). """
if disk_name in self.disks:
# We already have this disk
return
assert not self._partitions_layed_out
self.disks[disk_name] = \
{ 'disk': None, # Disk object
'mapped': False, # True if kpartx mapping exists
'numpart': 0, # Number of allocate partitions
'partitions': [], # Indexes to self.partitions
'offset': 0, # Offset of next partition (in sectors)
# Minimum required disk size to fit all partitions (in bytes)
'min_size': 0,
'ptable_format': "msdos" } # Partition table format
def add_disk(self, disk_name, disk_obj):
""" Add a disk object which have to be partitioned. More than one disk
can be added. In case of multiple disks, disk partitions have to be
added for each disk separately with 'add_partition()". """
self.__add_disk(disk_name)
self.disks[disk_name]['disk'] = disk_obj
def __add_partition(self, part):
""" This is a helper function for 'add_partition()' which adds a
partition to the internal list of partitions. """
assert not self._partitions_layed_out
self.partitions.append(part)
self.__add_disk(part['disk_name'])
def add_partition(self, size, disk_name, mountpoint, source_file = None, fstype = None,
label=None, fsopts = None, boot = False, align = None,
part_type = None):
""" Add the next partition. Prtitions have to be added in the
first-to-last order. """
ks_pnum = len(self.partitions)
# Converting MB to sectors for parted
size = size * 1024 * 1024 / self.sector_size
# We need to handle subvolumes for btrfs
if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1:
self.btrfscmd=find_binary_path("btrfs")
subvol = None
opts = fsopts.split(",")
for opt in opts:
if opt.find("subvol=") != -1:
subvol = opt.replace("subvol=", "").strip()
break
if not subvol:
raise MountError("No subvolume: %s" % fsopts)
self.subvolumes.append({'size': size, # In sectors
'mountpoint': mountpoint, # Mount relative to chroot
'fstype': fstype, # Filesystem type
'fsopts': fsopts, # Filesystem mount options
'disk_name': disk_name, # physical disk name holding partition
'device': None, # kpartx device node for partition
'mount': None, # Mount object
'subvol': subvol, # Subvolume name
'boot': boot, # Bootable flag
'mounted': False # Mount flag
})
# We still need partition for "/" or non-subvolume
if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1:
# Don't need subvolume for "/" because it will be set as default subvolume
if fsopts and fsopts.find("subvol=") != -1:
opts = fsopts.split(",")
for opt in opts:
if opt.strip().startswith("subvol="):
opts.remove(opt)
break
fsopts = ",".join(opts)
part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file
'size': size, # In sectors
'mountpoint': mountpoint, # Mount relative to chroot
'source_file': source_file, # partition contents
'fstype': fstype, # Filesystem type
'fsopts': fsopts, # Filesystem mount options
'label': label, # Partition label
'disk_name': disk_name, # physical disk name holding partition
'device': None, # kpartx device node for partition
'mount': None, # Mount object
'num': None, # Partition number
'boot': boot, # Bootable flag
'align': align, # Partition alignment
'part_type' : part_type, # Partition type
'partuuid': None } # Partition UUID (GPT-only)
self.__add_partition(part)
def layout_partitions(self, ptable_format = "msdos"):
""" Layout the partitions, meaning calculate the position of every
partition on the disk. The 'ptable_format' parameter defines the
partition table format, and may be either "msdos" or "gpt". """
msger.debug("Assigning %s partitions to disks" % ptable_format)
if ptable_format not in ('msdos', 'gpt'):
raise MountError("Unknown partition table format '%s', supported " \
"formats are: 'msdos' and 'gpt'" % ptable_format)
if self._partitions_layed_out:
return
self._partitions_layed_out = True
# Go through partitions in the order they are added in .ks file
for n in range(len(self.partitions)):
p = self.partitions[n]
if not self.disks.has_key(p['disk_name']):
raise MountError("No disk %s for partition %s" \
% (p['disk_name'], p['mountpoint']))
if p['part_type'] and ptable_format != 'gpt':
# The --part-type can also be implemented for MBR partitions,
# in which case it would map to the 1-byte "partition type"
# filed at offset 3 of the partition entry.
raise MountError("setting custom partition type is only " \
"imlemented for GPT partitions")
# Get the disk where the partition is located
d = self.disks[p['disk_name']]
d['numpart'] += 1
d['ptable_format'] = ptable_format
if d['numpart'] == 1:
if ptable_format == "msdos":
overhead = MBR_OVERHEAD
else:
overhead = GPT_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
d['offset'] += overhead
# Steal few sectors from the first partition to offset for the
# partitioning overhead
p['size'] -= overhead
if p['align']:
# If not first partition and we do have alignment set we need
# to align the partition.
# FIXME: This leaves a empty spaces to the disk. To fill the
# gaps we could enlargea the previous partition?
# Calc how much the alignment is off.
align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size)
# We need to move forward to the next alignment point
align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors
msger.debug("Realignment for %s%s with %s sectors, original"
" offset %s, target alignment is %sK." %
(p['disk_name'], d['numpart'], align_sectors,
d['offset'], p['align']))
# increase the offset so we actually start the partition on right alignment
d['offset'] += align_sectors
p['start'] = d['offset']
d['offset'] += p['size']
p['type'] = 'primary'
p['num'] = d['numpart']
if d['ptable_format'] == "msdos":
if d['numpart'] > 2:
# Every logical partition requires an additional sector for
# the EBR, so steal the last sector from the end of each
# partition starting from the 3rd one for the EBR. This
# will make sure the logical partitions are aligned
# correctly.
p['size'] -= 1
if d['numpart'] > 3:
p['type'] = 'logical'
p['num'] = d['numpart'] + 1
d['partitions'].append(n)
msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes)." \
% (p['mountpoint'], p['disk_name'], p['num'],
p['start'], p['start'] + p['size'] - 1,
p['size'], p['size'] * self.sector_size))
# Once all the partitions have been layed out, we can calculate the
# minumim disk sizes.
for disk_name, d in self.disks.items():
d['min_size'] = d['offset']
if d['ptable_format'] == 'gpt':
# Account for the backup partition table at the end of the disk
d['min_size'] += GPT_OVERHEAD
d['min_size'] *= self.sector_size
def __run_parted(self, args):
""" Run parted with arguments specified in the 'args' list. """
args.insert(0, self.parted)
msger.debug(args)
rc, out = runner.runtool(args, catch = 3)
out = out.strip()
if out:
msger.debug('"parted" output: %s' % out)
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing real errors based on return
# code.
msger.debug("WARNING: parted returned '%s' instead of 0" % rc)
def __create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
# Start is included to the size so we need to substract one from the end.
end = start + size - 1
msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
(parttype, start, end, size))
args = ["-s", device, "unit", "s", "mkpart", parttype]
if fstype:
args.extend([fstype])
args.extend(["%d" % start, "%d" % end])
return self.__run_parted(args)
def __format_disks(self):
self.layout_partitions()
if self.skipformat:
msger.debug("Skipping disk format, because skipformat flag is set.")
return
for dev in self.disks.keys():
d = self.disks[dev]
msger.debug("Initializing partition table for %s" % \
(d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "mklabel",
d['ptable_format']])
msger.debug("Creating partitions")
for p in self.partitions:
d = self.disks[p['disk_name']]
if d['ptable_format'] == "msdos" and p['num'] == 5:
# The last sector of the 3rd partition was reserved for the EBR
# of the first _logical_ partition. This is why the extended
# partition should start one sector before the first logical
# partition.
self.__create_partition(d['disk'].device, "extended",
None, p['start'] - 1,
d['offset'] - p['start'])
if p['fstype'] == "swap":
parted_fs_type = "linux-swap"
elif p['fstype'] == "vfat":
parted_fs_type = "fat32"
elif p['fstype'] == "msdos":
parted_fs_type = "fat16"
else:
# Type for ext2/ext3/ext4/btrfs
parted_fs_type = "ext2"
# Boot ROM of OMAP boards require vfat boot partition to have an
# even number of sectors.
if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \
and p['size'] % 2:
msger.debug("Substracting one sector from '%s' partition to " \
"get even number of sectors for the partition" % \
p['mountpoint'])
p['size'] -= 1
self.__create_partition(d['disk'].device, p['type'],
parted_fs_type, p['start'], p['size'])
if p['boot']:
if d['ptable_format'] == 'gpt':
flag_name = "legacy_boot"
else:
flag_name = "boot"
msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
(flag_name, p['num'], d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "set",
"%d" % p['num'], flag_name, "on"])
# Parted defaults to enabling the lba flag for fat16 partitions,
# which causes compatibility issues with some firmware (and really
# isn't necessary).
if parted_fs_type == "fat16":
if d['ptable_format'] == 'msdos':
msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
(p['num'], d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "set",
"%d" % p['num'], "lba", "off"])
# If the partition table format is "gpt", find out PARTUUIDs for all
# the partitions. And if users specified custom parition type UUIDs,
# set them.
for disk_name, disk in self.disks.items():
if disk['ptable_format'] != 'gpt':
continue
pnum = 0
gpt_parser = GptParser(d['disk'].device, SECTOR_SIZE)
# Iterate over all GPT partitions on this disk
for entry in gpt_parser.get_partitions():
pnum += 1
# Find the matching partition in the 'self.partitions' list
for n in d['partitions']:
p = self.partitions[n]
if p['num'] == pnum:
# Found, fetch PARTUUID (partition's unique ID)
p['partuuid'] = entry['part_uuid']
msger.debug("PARTUUID for partition %d on disk '%s' " \
"(mount point '%s') is '%s'" % (pnum, \
disk_name, p['mountpoint'], p['partuuid']))
if p['part_type']:
entry['type_uuid'] = p['part_type']
msger.debug("Change type of partition %d on disk " \
"'%s' (mount point '%s') to '%s'" % \
(pnum, disk_name, p['mountpoint'],
p['part_type']))
gpt_parser.change_partition(entry)
del gpt_parser
def __map_partitions(self):
"""Load it if dm_snapshot isn't loaded. """
load_module("dm_snapshot")
for dev in self.disks.keys():
d = self.disks[dev]
if d['mapped']:
continue
msger.debug("Running kpartx on %s" % d['disk'].device )
rc, kpartxOutput = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device])
kpartxOutput = kpartxOutput.splitlines()
if rc != 0:
raise MountError("Failed to query partition mapping for '%s'" %
d['disk'].device)
# Strip trailing blank and mask verbose output
i = 0
while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop":
i = i + 1
kpartxOutput = kpartxOutput[i:]
# Make sure kpartx reported the right count of partitions
if len(kpartxOutput) != d['numpart']:
# If this disk has more than 3 partitions, then in case of MBR
# paritions there is an extended parition. Different versions
# of kpartx behave differently WRT the extended partition -
# some map it, some ignore it. This is why we do the below hack
# - if kpartx reported one more partition and the partition
# table type is "msdos" and the amount of partitions is more
# than 3, we just assume kpartx mapped the extended parition
# and we remove it.
if len(kpartxOutput) == d['numpart'] + 1 \
and d['ptable_format'] == 'msdos' and len(kpartxOutput) > 3:
kpartxOutput.pop(3)
else:
raise MountError("Unexpected number of partitions from " \
"kpartx: %d != %d" % \
(len(kpartxOutput), d['numpart']))
for i in range(len(kpartxOutput)):
line = kpartxOutput[i]
newdev = line.split()[0]
mapperdev = "/dev/mapper/" + newdev
loopdev = d['disk'].device + newdev[-1]
msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev))
pnum = d['partitions'][i]
self.partitions[pnum]['device'] = loopdev
# grub's install wants partitions to be named
# to match their parent device + partition num
# kpartx doesn't work like this, so we add compat
# symlinks to point to /dev/mapper
if os.path.lexists(loopdev):
os.unlink(loopdev)
os.symlink(mapperdev, loopdev)
msger.debug("Adding partx mapping for %s" % d['disk'].device)
rc = runner.show([self.kpartx, "-v", "-a", d['disk'].device])
if rc != 0:
# Make sure that the device maps are also removed on error case.
# The d['mapped'] isn't set to True if the kpartx fails so
# failed mapping will not be cleaned on cleanup either.
runner.quiet([self.kpartx, "-d", d['disk'].device])
raise MountError("Failed to map partitions for '%s'" %
d['disk'].device)
# FIXME: there is a bit delay for multipath device setup,
# wait 10ms for the setup
import time
time.sleep(10)
d['mapped'] = True
def __unmap_partitions(self):
for dev in self.disks.keys():
d = self.disks[dev]
if not d['mapped']:
continue
msger.debug("Removing compat symlinks")
for pnum in d['partitions']:
if self.partitions[pnum]['device'] != None:
os.unlink(self.partitions[pnum]['device'])
self.partitions[pnum]['device'] = None
msger.debug("Unmapping %s" % d['disk'].device)
rc = runner.quiet([self.kpartx, "-d", d['disk'].device])
if rc != 0:
raise MountError("Failed to unmap partitions for '%s'" %
d['disk'].device)
d['mapped'] = False
def __calculate_mountorder(self):
msger.debug("Calculating mount order")
for p in self.partitions:
if p['mountpoint']:
self.mountOrder.append(p['mountpoint'])
self.unmountOrder.append(p['mountpoint'])
self.mountOrder.sort()
self.unmountOrder.sort()
self.unmountOrder.reverse()
def cleanup(self):
Mount.cleanup(self)
if self.disks:
self.__unmap_partitions()
for dev in self.disks.keys():
d = self.disks[dev]
try:
d['disk'].cleanup()
except:
pass
def unmount(self):
self.__unmount_subvolumes()
for mp in self.unmountOrder:
if mp == 'swap':
continue
p = None
for p1 in self.partitions:
if p1['mountpoint'] == mp:
p = p1
break
if p['mount'] != None:
try:
# Create subvolume snapshot here
if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created:
self.__create_subvolume_snapshots(p, p["mount"])
p['mount'].cleanup()
except:
pass
p['mount'] = None
# Only for btrfs
def __get_subvolume_id(self, rootpath, subvol):
if not self.btrfscmd:
self.btrfscmd=find_binary_path("btrfs")
argv = [ self.btrfscmd, "subvolume", "list", rootpath ]
rc, out = runner.runtool(argv)
msger.debug(out)
if rc != 0:
raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, rc))
subvolid = -1
for line in out.splitlines():
if line.endswith(" path %s" % subvol):
subvolid = line.split()[1]
if not subvolid.isdigit():
raise MountError("Invalid subvolume id: %s" % subvolid)
subvolid = int(subvolid)
break
return subvolid
def __create_subvolume_metadata(self, p, pdisk):
if len(self.subvolumes) == 0:
return
argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ]
rc, out = runner.runtool(argv)
msger.debug(out)
if rc != 0:
raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, rc))
subvolid_items = out.splitlines()
subvolume_metadata = ""
for subvol in self.subvolumes:
for line in subvolid_items:
if line.endswith(" path %s" % subvol["subvol"]):
subvolid = line.split()[1]
if not subvolid.isdigit():
raise MountError("Invalid subvolume id: %s" % subvolid)
subvolid = int(subvolid)
opts = subvol["fsopts"].split(",")
for opt in opts:
if opt.strip().startswith("subvol="):
opts.remove(opt)
break
fsopts = ",".join(opts)
subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts)
if subvolume_metadata:
fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w")
fd.write(subvolume_metadata)
fd.close()
def __get_subvolume_metadata(self, p, pdisk):
subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir
if not os.path.exists(subvolume_metadata_file):
return
fd = open(subvolume_metadata_file, "r")
content = fd.read()
fd.close()
for line in content.splitlines():
items = line.split("\t")
if items and len(items) == 4:
self.subvolumes.append({'size': 0, # In sectors
'mountpoint': items[2], # Mount relative to chroot
'fstype': "btrfs", # Filesystem type
'fsopts': items[3] + ",subvol=%s" % items[1], # Filesystem mount options
'disk_name': p['disk_name'], # physical disk name holding partition
'device': None, # kpartx device node for partition
'mount': None, # Mount object
'subvol': items[1], # Subvolume name
'boot': False, # Bootable flag
'mounted': False # Mount flag
})
def __create_subvolumes(self, p, pdisk):
""" Create all the subvolumes. """
for subvol in self.subvolumes:
argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]]
rc = runner.show(argv)
if rc != 0:
raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], rc))
# Set default subvolume, subvolume for "/" is default
subvol = None
for subvolume in self.subvolumes:
if subvolume["mountpoint"] == "/" and p['disk_name'] == subvolume['disk_name']:
subvol = subvolume
break
if subvol:
# Get default subvolume id
subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
# Set default subvolume
if subvolid != -1:
rc = runner.show([ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir])
if rc != 0:
raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, rc))
self.__create_subvolume_metadata(p, pdisk)
def __mount_subvolumes(self, p, pdisk):
if self.skipformat:
# Get subvolume info
self.__get_subvolume_metadata(p, pdisk)
# Set default mount options
if len(self.subvolumes) != 0:
for subvol in self.subvolumes:
if subvol["mountpoint"] == p["mountpoint"] == "/":
opts = subvol["fsopts"].split(",")
for opt in opts:
if opt.strip().startswith("subvol="):
opts.remove(opt)
break
pdisk.fsopts = ",".join(opts)
break
if len(self.subvolumes) == 0:
# Return directly if no subvolumes
return
# Remount to make default subvolume mounted
rc = runner.show([self.umountcmd, pdisk.mountdir])
if rc != 0:
raise MountError("Failed to umount %s" % pdisk.mountdir)
rc = runner.show([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir])
if rc != 0:
raise MountError("Failed to umount %s" % pdisk.mountdir)
for subvol in self.subvolumes:
if subvol["mountpoint"] == "/":
continue
subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
if subvolid == -1:
msger.debug("WARNING: invalid subvolume %s" % subvol["subvol"])
continue
# Replace subvolume name with subvolume ID
opts = subvol["fsopts"].split(",")
for opt in opts:
if opt.strip().startswith("subvol="):
opts.remove(opt)
break
opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]])
fsopts = ",".join(opts)
subvol['fsopts'] = fsopts
mountpoint = self.mountdir + subvol['mountpoint']
makedirs(mountpoint)
rc = runner.show([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint])
if rc != 0:
raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint))
subvol["mounted"] = True
def __unmount_subvolumes(self):
""" It may be called multiple times, so we need to chekc if it is still mounted. """
for subvol in self.subvolumes:
if subvol["mountpoint"] == "/":
continue
if not subvol["mounted"]:
continue
mountpoint = self.mountdir + subvol['mountpoint']
rc = runner.show([self.umountcmd, mountpoint])
if rc != 0:
raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint))
subvol["mounted"] = False
def __create_subvolume_snapshots(self, p, pdisk):
import time
if self.snapshot_created:
return
# Remount with subvolid=0
rc = runner.show([self.umountcmd, pdisk.mountdir])
if rc != 0:
raise MountError("Failed to umount %s" % pdisk.mountdir)
if pdisk.fsopts:
mountopts = pdisk.fsopts + ",subvolid=0"
else:
mountopts = "subvolid=0"
rc = runner.show([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir])
if rc != 0:
raise MountError("Failed to umount %s" % pdisk.mountdir)
# Create all the subvolume snapshots
snapshotts = time.strftime("%Y%m%d-%H%M")
for subvol in self.subvolumes:
subvolpath = pdisk.mountdir + "/" + subvol["subvol"]
snapshotpath = subvolpath + "_%s-1" % snapshotts
rc = runner.show([ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ])
if rc != 0:
raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, rc))
self.snapshot_created = True
def __install_partition(self, num, source_file, start, size):
"""
Install source_file contents into a partition.
"""
if not source_file: # nothing to install
return
# Start is included in the size so need to substract one from the end.
end = start + size - 1
msger.debug("Installed %s in partition %d, sectors %d-%d, size %d sectors" % (source_file, num, start, end, size))
dd_cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \
(source_file, self.image_file, self.sector_size, start, size)
rc, out = exec_cmd(dd_cmd)
def install(self, image_file):
msger.debug("Installing partitions")
self.image_file = image_file
for p in self.partitions:
d = self.disks[p['disk_name']]
if d['ptable_format'] == "msdos" and p['num'] == 5:
# The last sector of the 3rd partition was reserved for the EBR
# of the first _logical_ partition. This is why the extended
# partition should start one sector before the first logical
# partition.
self.__install_partition(p['num'], p['source_file'],
p['start'] - 1,
d['offset'] - p['start'])
self.__install_partition(p['num'], p['source_file'],
p['start'], p['size'])
def mount(self):
for dev in self.disks.keys():
d = self.disks[dev]
d['disk'].create()
self.__format_disks()
self.__calculate_mountorder()
return
def resparse(self, size = None):
# Can't re-sparse a disk image - too hard
pass
########NEW FILE########
__FILENAME__ = proxy
#!/usr/bin/python -tt
#
# Copyright (c) 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import urlparse
_my_proxies = {}
_my_noproxy = None
_my_noproxy_list = []
def set_proxy_environ():
global _my_noproxy, _my_proxies
if not _my_proxies:
return
for key in _my_proxies.keys():
os.environ[key + "_proxy"] = _my_proxies[key]
if not _my_noproxy:
return
os.environ["no_proxy"] = _my_noproxy
def unset_proxy_environ():
for env in ('http_proxy',
'https_proxy',
'ftp_proxy',
'all_proxy'):
if env in os.environ:
del os.environ[env]
ENV=env.upper()
if ENV in os.environ:
del os.environ[ENV]
def _set_proxies(proxy = None, no_proxy = None):
"""Return a dictionary of scheme -> proxy server URL mappings.
"""
global _my_noproxy, _my_proxies
_my_proxies = {}
_my_noproxy = None
proxies = []
if proxy:
proxies.append(("http_proxy", proxy))
if no_proxy:
proxies.append(("no_proxy", no_proxy))
# Get proxy settings from environment if not provided
if not proxy and not no_proxy:
proxies = os.environ.items()
# Remove proxy env variables, urllib2 can't handle them correctly
unset_proxy_environ()
for name, value in proxies:
name = name.lower()
if value and name[-6:] == '_proxy':
if name[0:2] != "no":
_my_proxies[name[:-6]] = value
else:
_my_noproxy = value
def _ip_to_int(ip):
ipint=0
shift=24
for dec in ip.split("."):
ipint |= int(dec) << shift
shift -= 8
return ipint
def _int_to_ip(val):
ipaddr=""
shift=0
for i in range(4):
dec = val >> shift
dec &= 0xff
ipaddr = ".%d%s" % (dec, ipaddr)
shift += 8
return ipaddr[1:]
def _isip(host):
if host.replace(".", "").isdigit():
return True
return False
def _set_noproxy_list():
global _my_noproxy, _my_noproxy_list
_my_noproxy_list = []
if not _my_noproxy:
return
for item in _my_noproxy.split(","):
item = item.strip()
if not item:
continue
if item[0] != '.' and item.find("/") == -1:
# Need to match it
_my_noproxy_list.append({"match":0,"needle":item})
elif item[0] == '.':
# Need to match at tail
_my_noproxy_list.append({"match":1,"needle":item})
elif item.find("/") > 3:
# IP/MASK, need to match at head
needle = item[0:item.find("/")].strip()
ip = _ip_to_int(needle)
netmask = 0
mask = item[item.find("/")+1:].strip()
if mask.isdigit():
netmask = int(mask)
netmask = ~((1<<(32-netmask)) - 1)
ip &= netmask
else:
shift=24
netmask=0
for dec in mask.split("."):
netmask |= int(dec) << shift
shift -= 8
ip &= netmask
_my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask})
def _isnoproxy(url):
(scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
if '@' in host:
user_pass, host = host.split('@', 1)
if ':' in host:
host, port = host.split(':', 1)
hostisip = _isip(host)
for item in _my_noproxy_list:
if hostisip and item["match"] <= 1:
continue
if item["match"] == 2 and hostisip:
if (_ip_to_int(host) & item["netmask"]) == item["needle"]:
return True
if item["match"] == 0:
if host == item["needle"]:
return True
if item["match"] == 1:
if host.rfind(item["needle"]) > 0:
return True
return False
def set_proxies(proxy = None, no_proxy = None):
_set_proxies(proxy, no_proxy)
_set_noproxy_list()
set_proxy_environ()
def get_proxy_for(url):
if url.startswith('file:') or _isnoproxy(url):
return None
type = url[0:url.index(":")]
proxy = None
if _my_proxies.has_key(type):
proxy = _my_proxies[type]
elif _my_proxies.has_key("http"):
proxy = _my_proxies["http"]
else:
proxy = None
return proxy
########NEW FILE########
__FILENAME__ = rpmmisc
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import sys
import re
import rpm
from mic import msger
from mic.utils.errors import CreatorError
from mic.utils.proxy import get_proxy_for
from mic.utils import runner
class RPMInstallCallback:
""" Command line callback class for callbacks from the RPM library.
"""
def __init__(self, ts, output=1):
self.output = output
self.callbackfilehandles = {}
self.total_actions = 0
self.total_installed = 0
self.installed_pkg_names = []
self.total_removed = 0
self.mark = "+"
self.marks = 40
self.lastmsg = None
self.tsInfo = None # this needs to be set for anything else to work
self.ts = ts
self.filelog = False
self.logString = []
self.headmsg = "Installing"
def _dopkgtup(self, hdr):
tmpepoch = hdr['epoch']
if tmpepoch is None: epoch = '0'
else: epoch = str(tmpepoch)
return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
def _makeHandle(self, hdr):
handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
hdr['release'], hdr['arch'])
return handle
def _localprint(self, msg):
if self.output:
msger.info(msg)
def _makefmt(self, percent, progress = True):
l = len(str(self.total_actions))
size = "%s.%s" % (l, l)
fmt_done = "[%" + size + "s/%" + size + "s]"
done = fmt_done % (self.total_installed + self.total_removed,
self.total_actions)
marks = self.marks - (2 * l)
width = "%s.%s" % (marks, marks)
fmt_bar = "%-" + width + "s"
if progress:
bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
fmt = "\r %-10.10s: %-20.20s " + bar + " " + done
else:
bar = fmt_bar % (self.mark * marks, )
fmt = " %-10.10s: %-20.20s " + bar + " " + done
return fmt
def _logPkgString(self, hdr):
"""return nice representation of the package for the log"""
(n,a,e,v,r) = self._dopkgtup(hdr)
if e == '0':
pkg = '%s.%s %s-%s' % (n, a, v, r)
else:
pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
return pkg
def callback(self, what, bytes, total, h, user):
if what == rpm.RPMCALLBACK_TRANS_START:
if bytes == 6:
self.total_actions = total
elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_TRANS_STOP:
pass
elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
self.lastmsg = None
hdr = None
if h is not None:
try:
hdr, rpmloc = h
except:
rpmloc = h
hdr = readRpmHeader(self.ts, h)
handle = self._makeHandle(hdr)
fd = os.open(rpmloc, os.O_RDONLY)
self.callbackfilehandles[handle]=fd
if hdr['name'] not in self.installed_pkg_names:
self.installed_pkg_names.append(hdr['name'])
self.total_installed += 1
return fd
else:
self._localprint("No header - huh?")
elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
hdr = None
if h is not None:
try:
hdr, rpmloc = h
except:
rpmloc = h
hdr = readRpmHeader(self.ts, h)
handle = self._makeHandle(hdr)
os.close(self.callbackfilehandles[handle])
fd = 0
# log stuff
#pkgtup = self._dopkgtup(hdr)
self.logString.append(self._logPkgString(hdr))
elif what == rpm.RPMCALLBACK_INST_PROGRESS:
if h is not None:
percent = (self.total_installed*100L)/self.total_actions
if total > 0:
try:
hdr, rpmloc = h
except:
rpmloc = h
m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc))
if m:
pkgname = m.group(1)
else:
pkgname = os.path.basename(rpmloc)
if self.output:
fmt = self._makefmt(percent)
msg = fmt % (self.headmsg, pkgname)
if msg != self.lastmsg:
self.lastmsg = msg
msger.info(msg)
if self.total_installed == self.total_actions:
msger.raw('')
msger.verbose('\n'.join(self.logString))
elif what == rpm.RPMCALLBACK_UNINST_START:
pass
elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_UNINST_STOP:
self.total_removed += 1
elif what == rpm.RPMCALLBACK_REPACKAGE_START:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
pass
def readRpmHeader(ts, filename):
""" Read an rpm header. """
fd = os.open(filename, os.O_RDONLY)
h = ts.hdrFromFdno(fd)
os.close(fd)
return h
def splitFilename(filename):
""" Pass in a standard style rpm fullname
Return a name, version, release, epoch, arch, e.g.::
foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
"""
if filename[-4:] == '.rpm':
filename = filename[:-4]
archIndex = filename.rfind('.')
arch = filename[archIndex+1:]
relIndex = filename[:archIndex].rfind('-')
rel = filename[relIndex+1:archIndex]
verIndex = filename[:relIndex].rfind('-')
ver = filename[verIndex+1:relIndex]
epochIndex = filename.find(':')
if epochIndex == -1:
epoch = ''
else:
epoch = filename[:epochIndex]
name = filename[epochIndex + 1:verIndex]
return name, ver, rel, epoch, arch
def getCanonX86Arch(arch):
#
if arch == "i586":
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("model name") and line.find("Geode(TM)") != -1:
return "geode"
return arch
# only athlon vs i686 isn't handled with uname currently
if arch != "i686":
return arch
# if we're i686 and AuthenticAMD, then we should be an athlon
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
return "athlon"
# i686 doesn't guarantee cmov, but we depend on it
elif line.startswith("flags") and line.find("cmov") == -1:
return "i586"
return arch
def getCanonX86_64Arch(arch):
if arch != "x86_64":
return arch
vendor = None
f = open("/proc/cpuinfo", "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("vendor_id"):
vendor = line.split(':')[1]
break
if vendor is None:
return arch
if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
return "amd64"
if vendor.find("GenuineIntel") != -1:
return "ia32e"
return arch
def getCanonArch():
arch = os.uname()[4]
if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
return getCanonX86Arch(arch)
if arch == "x86_64":
return getCanonX86_64Arch(arch)
return arch
# Copy from libsatsolver:poolarch.c, with cleanup
archPolicies = {
"x86_64": "x86_64:i686:i586:i486:i386",
"i686": "i686:i586:i486:i386",
"i586": "i586:i486:i386",
"ia64": "ia64:i686:i586:i486:i386",
"armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl",
"armv7thl": "armv7thl:armv7hl",
"armv7nhl": "armv7nhl:armv7hl",
"armv7hl": "armv7hl",
"armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l",
"armv5l": "armv5l:armv4tl:armv4l:armv3l",
}
# dict mapping arch -> ( multicompat, best personality, biarch personality )
multilibArches = {
"x86_64": ( "athlon", "x86_64", "athlon" ),
}
# from yumUtils.py
arches = {
# ia32
"athlon": "i686",
"i686": "i586",
"geode": "i586",
"i586": "i486",
"i486": "i386",
"i386": "noarch",
# amd64
"x86_64": "athlon",
"amd64": "x86_64",
"ia32e": "x86_64",
# arm
"armv7tnhl": "armv7nhl",
"armv7nhl": "armv7hl",
"armv7hl": "noarch",
"armv7l": "armv6l",
"armv6l": "armv5tejl",
"armv5tejl": "armv5tel",
"armv5tel": "noarch",
#itanium
"ia64": "noarch",
}
def isMultiLibArch(arch=None):
"""returns true if arch is a multilib arch, false if not"""
if arch is None:
arch = getCanonArch()
if not arches.has_key(arch): # or we could check if it is noarch
return False
if multilibArches.has_key(arch):
return True
if multilibArches.has_key(arches[arch]):
return True
return False
def getBaseArch():
myarch = getCanonArch()
if not arches.has_key(myarch):
return myarch
if isMultiLibArch(arch=myarch):
if multilibArches.has_key(myarch):
return myarch
else:
return arches[myarch]
if arches.has_key(myarch):
basearch = myarch
value = arches[basearch]
while value != 'noarch':
basearch = value
value = arches[basearch]
return basearch
def checkRpmIntegrity(bin_rpm, package):
return runner.quiet([bin_rpm, "-K", "--nosignature", package])
def checkSig(ts, package):
""" Takes a transaction set and a package, check it's sigs,
return 0 if they are all fine
return 1 if the gpg key can't be found
return 2 if the header is in someway damaged
return 3 if the key is not trusted
return 4 if the pkg is not gpg or pgp signed
"""
value = 0
currentflags = ts.setVSFlags(0)
fdno = os.open(package, os.O_RDONLY)
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error, e:
if str(e) == "public key not availaiable":
value = 1
if str(e) == "public key not available":
value = 1
if str(e) == "public key not trusted":
value = 3
if str(e) == "error reading package header":
value = 2
else:
error, siginfo = getSigInfo(hdr)
if error == 101:
os.close(fdno)
del hdr
value = 4
else:
del hdr
try:
os.close(fdno)
except OSError:
pass
ts.setVSFlags(currentflags) # put things back like they were before
return value
def getSigInfo(hdr):
""" checks signature from an hdr hand back signature information and/or
an error code
"""
import locale
locale.setlocale(locale.LC_ALL, 'C')
string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
siginfo = hdr.sprintf(string)
if siginfo != '(none)':
error = 0
sigtype, sigdate, sigid = siginfo.split(',')
else:
error = 101
sigtype = 'MD5'
sigdate = 'None'
sigid = 'None'
infotuple = (sigtype, sigdate, sigid)
return error, infotuple
def checkRepositoryEULA(name, repo):
""" This function is to check the EULA file if provided.
return True: no EULA or accepted
return False: user declined the EULA
"""
import tempfile
import shutil
import urlparse
import urllib2 as u2
import httplib
from mic.utils.errors import CreatorError
def _check_and_download_url(u2opener, url, savepath):
try:
if u2opener:
f = u2opener.open(url)
else:
f = u2.urlopen(url)
except u2.HTTPError, httperror:
if httperror.code in (404, 503):
return None
else:
raise CreatorError(httperror)
except OSError, oserr:
if oserr.errno == 2:
return None
else:
raise CreatorError(oserr)
except IOError, oserr:
if hasattr(oserr, "reason") and oserr.reason.errno == 2:
return None
else:
raise CreatorError(oserr)
except u2.URLError, err:
raise CreatorError(err)
except httplib.HTTPException, e:
raise CreatorError(e)
# save to file
licf = open(savepath, "w")
licf.write(f.read())
licf.close()
f.close()
return savepath
def _pager_file(savepath):
if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
pagers = ('w3m', 'links', 'lynx', 'less', 'more')
else:
pagers = ('less', 'more')
file_showed = False
for pager in pagers:
cmd = "%s %s" % (pager, savepath)
try:
os.system(cmd)
except OSError:
continue
else:
file_showed = True
break
if not file_showed:
f = open(savepath)
msger.raw(f.read())
f.close()
msger.pause()
# when proxy needed, make urllib2 follow it
proxy = repo.proxy
proxy_username = repo.proxy_username
proxy_password = repo.proxy_password
if not proxy:
proxy = get_proxy_for(repo.baseurl[0])
handlers = []
auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
u2opener = None
if proxy:
if proxy_username:
proxy_netloc = urlparse.urlsplit(proxy).netloc
if proxy_password:
proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
else:
proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
else:
proxy_url = proxy
proxy_support = u2.ProxyHandler({'http': proxy_url,
'https': proxy_url,
'ftp': proxy_url})
handlers.append(proxy_support)
# download all remote files to one temp dir
baseurl = None
repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
for url in repo.baseurl:
tmphandlers = handlers[:]
(scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/')
if scheme not in ("http", "https", "ftp", "ftps", "file"):
raise CreatorError("Error: invalid url %s" % url)
if '@' in host:
try:
user_pass, host = host.split('@', 1)
if ':' in user_pass:
user, password = user_pass.split(':', 1)
except ValueError, e:
raise CreatorError('Bad URL: %s' % url)
msger.verbose("adding HTTP auth: %s, XXXXXXXX" %(user))
auth_handler.add_password(None, host, user, password)
tmphandlers.append(auth_handler)
url = scheme + "://" + host + path + parm + query + frag
if tmphandlers:
u2opener = u2.build_opener(*tmphandlers)
# try to download
repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
repo_eula_path = _check_and_download_url(
u2opener,
repo_eula_url,
os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
if repo_eula_path:
# found
baseurl = url
break
if not baseurl:
shutil.rmtree(repo_lic_dir) #cleanup
return True
# show the license file
msger.info('For the software packages in this yum repo:')
msger.info(' %s: %s' % (name, baseurl))
msger.info('There is an "End User License Agreement" file that need to be checked.')
msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.')
msger.pause()
_pager_file(repo_eula_path)
# Asking for the "Accept/Decline"
if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'):
msger.warning('Will not install pkgs from this repo.')
shutil.rmtree(repo_lic_dir) #cleanup
return False
# try to find support_info.html for extra infomation
repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
repo_info_path = _check_and_download_url(
u2opener,
repo_info_url,
os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
if repo_info_path:
msger.info('There is one more file in the repo for additional support information, please read it')
msger.pause()
_pager_file(repo_info_path)
#cleanup
shutil.rmtree(repo_lic_dir)
return True
########NEW FILE########
__FILENAME__ = runner
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import subprocess
from mic import msger
def runtool(cmdln_or_args, catch=1):
""" wrapper for most of the subprocess calls
input:
cmdln_or_args: can be both args and cmdln str (shell=True)
catch: 0, quitely run
1, only STDOUT
2, only STDERR
3, both STDOUT and STDERR
return:
(rc, output)
if catch==0: the output will always None
"""
if catch not in (0, 1, 2, 3):
# invalid catch selection, will cause exception, that's good
return None
if isinstance(cmdln_or_args, list):
cmd = cmdln_or_args[0]
shell = False
else:
import shlex
cmd = shlex.split(cmdln_or_args)[0]
shell = True
if catch != 3:
dev_null = os.open("/dev/null", os.O_WRONLY)
if catch == 0:
sout = dev_null
serr = dev_null
elif catch == 1:
sout = subprocess.PIPE
serr = dev_null
elif catch == 2:
sout = dev_null
serr = subprocess.PIPE
elif catch == 3:
sout = subprocess.PIPE
serr = subprocess.STDOUT
try:
p = subprocess.Popen(cmdln_or_args, stdout=sout,
stderr=serr, shell=shell)
(sout, serr) = p.communicate()
# combine stdout and stderr, filter None out
out = ''.join(filter(None, [sout, serr]))
except OSError, e:
if e.errno == 2:
# [Errno 2] No such file or directory
msger.error('Cannot run command: %s, lost dependency?' % cmd)
else:
raise # relay
finally:
if catch != 3:
os.close(dev_null)
return (p.returncode, out)
def show(cmdln_or_args):
# show all the message using msger.verbose
rc, out = runtool(cmdln_or_args, catch=3)
if isinstance(cmdln_or_args, list):
cmd = ' '.join(cmdln_or_args)
else:
cmd = cmdln_or_args
msg = 'running command: "%s"' % cmd
if out: out = out.strip()
if out:
msg += ', with output::'
msg += '\n +----------------'
for line in out.splitlines():
msg += '\n | %s' % line
msg += '\n +----------------'
msger.verbose(msg)
return rc
def outs(cmdln_or_args, catch=1):
# get the outputs of tools
return runtool(cmdln_or_args, catch)[1].strip()
def quiet(cmdln_or_args):
return runtool(cmdln_or_args, catch=0)[0]
########NEW FILE########
__FILENAME__ = __version__
VERSION = "0.14"
########NEW FILE########
__FILENAME__ = oepydevshell-internal
#!/usr/bin/env python
import os
import sys
import time
import select
import fcntl
import termios
import readline
import signal
def nonblockingfd(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
def echonocbreak(fd):
old = termios.tcgetattr(fd)
old[3] = old[3] | termios.ECHO | termios.ICANON
termios.tcsetattr(fd, termios.TCSADRAIN, old)
def cbreaknoecho(fd):
old = termios.tcgetattr(fd)
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
termios.tcsetattr(fd, termios.TCSADRAIN, old)
if len(sys.argv) != 3:
print("Incorrect parameters")
sys.exit(1)
pty = open(sys.argv[1], "w+b", 0)
parent = int(sys.argv[2])
# Don't buffer output by line endings
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
nonblockingfd(pty)
nonblockingfd(sys.stdin)
histfile = os.path.expanduser("~/.oedevpyshell-history")
readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(histfile)
except IOError:
pass
try:
i = ""
o = ""
# Need cbreak/noecho whilst in select so we trigger on any keypress
cbreaknoecho(sys.stdin.fileno())
# Send our PID to the other end so they can kill us.
pty.write(str(os.getpid()) + "\n")
while True:
try:
writers = []
if i:
writers.append(sys.stdout)
(ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
try:
if pty in ready:
i = i + pty.read()
if i:
# Write a page at a time to avoid overflowing output
# d.keys() is a good way to do that
sys.stdout.write(i[:4096])
i = i[4096:]
if sys.stdin in ready:
echonocbreak(sys.stdin.fileno())
o = raw_input()
cbreaknoecho(sys.stdin.fileno())
pty.write(o + "\n")
except (IOError, OSError) as e:
if e.errno == 11:
continue
if e.errno == 5:
sys.exit(0)
raise
except EOFError:
sys.exit(0)
except KeyboardInterrupt:
os.kill(parent, signal.SIGINT)
except SystemExit:
pass
except Exception as e:
import traceback
print("Exception in oepydehshell-internal: " + str(e))
traceback.print_exc()
time.sleep(5)
finally:
readline.write_history_file(histfile)
########NEW FILE########
__FILENAME__ = opkg-query-helper
#!/usr/bin/env python
# OpenEmbedded opkg query helper utility
#
# Written by: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# Copyright 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
import sys
import fileinput
import re
archmode = False
filemode = False
vermode = False
args = []
for arg in sys.argv[1:]:
if arg == '-a':
archmode = True
elif arg == '-f':
filemode = True
elif arg == '-v':
vermode = True
else:
args.append(arg)
# Regex for removing version specs after dependency items
verregex = re.compile(' \([=<>]* [^ )]*\)')
pkg = ""
ver = ""
for line in fileinput.input(args):
line = line.rstrip()
if ': ' in line:
if line.startswith("Package:"):
pkg = line.split(": ")[1]
ver = ""
else:
if archmode:
if line.startswith("Architecture:"):
arch = line.split(": ")[1]
print("%s %s" % (pkg,arch))
elif filemode:
if line.startswith("Version:"):
ver = line.split(": ")[1]
elif line.startswith("Architecture:"):
arch = line.split(": ")[1]
print("%s %s_%s_%s.ipk %s" % (pkg,pkg,ver,arch,arch))
elif vermode:
if line.startswith("Version:"):
ver = line.split(": ")[1]
elif line.startswith("Architecture:"):
arch = line.split(": ")[1]
print("%s %s %s" % (pkg,arch,ver))
else:
if line.startswith("Depends:"):
depval = line.split(": ")[1]
deps = depval.split(", ")
for dep in deps:
dep = verregex.sub('', dep)
print("%s|%s" % (pkg,dep))
elif line.startswith("Recommends:"):
recval = line.split(": ")[1]
recs = recval.split(", ")
for rec in recs:
rec = verregex.sub('', rec)
print("%s|%s [REC]" % (pkg, rec))
########NEW FILE########
__FILENAME__ = batch
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import cairo
from . import draw
from .draw import RenderOptions
def render(writer, trace, app_options, filename):
handlers = {
"png": (lambda w, h: cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h), \
lambda sfc: sfc.write_to_png(filename)),
"pdf": (lambda w, h: cairo.PDFSurface(filename, w, h), lambda sfc: 0),
"svg": (lambda w, h: cairo.SVGSurface(filename, w, h), lambda sfc: 0)
}
if app_options.format is None:
fmt = filename.rsplit('.', 1)[1]
else:
fmt = app_options.format
if not (fmt in handlers):
writer.error ("Unknown format '%s'." % fmt)
return 10
make_surface, write_surface = handlers[fmt]
options = RenderOptions (app_options)
(w, h) = draw.extents (options, 1.0, trace)
w = max (w, draw.MIN_IMG_W)
surface = make_surface (w, h)
ctx = cairo.Context (surface)
draw.render (ctx, options, 1.0, trace)
write_surface (surface)
writer.status ("bootchart written to '%s'" % filename)
########NEW FILE########
__FILENAME__ = draw
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import cairo
import math
import re
import random
import colorsys
from operator import itemgetter
class RenderOptions:
def __init__(self, app_options):
# should we render a cumulative CPU time chart
self.cumulative = True
self.charts = True
self.kernel_only = False
self.app_options = app_options
def proc_tree (self, trace):
if self.kernel_only:
return trace.kernel_tree
else:
return trace.proc_tree
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
WHITE = (1.0, 1.0, 1.0, 1.0)
# Process tree border color.
BORDER_COLOR = (0.63, 0.63, 0.63, 1.0)
# Second tick line color.
TICK_COLOR = (0.92, 0.92, 0.92, 1.0)
# 5-second tick line color.
TICK_COLOR_BOLD = (0.86, 0.86, 0.86, 1.0)
# Annotation colour
ANNOTATION_COLOR = (0.63, 0.0, 0.0, 0.5)
# Text color.
TEXT_COLOR = (0.0, 0.0, 0.0, 1.0)
# Font family
FONT_NAME = "Bitstream Vera Sans"
# Title text font.
TITLE_FONT_SIZE = 18
# Default text font.
TEXT_FONT_SIZE = 12
# Axis label font.
AXIS_FONT_SIZE = 11
# Legend font.
LEGEND_FONT_SIZE = 12
# CPU load chart color.
CPU_COLOR = (0.40, 0.55, 0.70, 1.0)
# IO wait chart color.
IO_COLOR = (0.76, 0.48, 0.48, 0.5)
# Disk throughput color.
DISK_TPUT_COLOR = (0.20, 0.71, 0.20, 1.0)
# CPU load chart color.
FILE_OPEN_COLOR = (0.20, 0.71, 0.71, 1.0)
# Mem cached color
MEM_CACHED_COLOR = CPU_COLOR
# Mem used color
MEM_USED_COLOR = IO_COLOR
# Buffers color
MEM_BUFFERS_COLOR = (0.4, 0.4, 0.4, 0.3)
# Swap color
MEM_SWAP_COLOR = DISK_TPUT_COLOR
# Process border color.
PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
# Waiting process color.
PROC_COLOR_D = (0.76, 0.48, 0.48, 0.5)
# Running process color.
PROC_COLOR_R = CPU_COLOR
# Sleeping process color.
PROC_COLOR_S = (0.94, 0.94, 0.94, 1.0)
# Stopped process color.
PROC_COLOR_T = (0.94, 0.50, 0.50, 1.0)
# Zombie process color.
PROC_COLOR_Z = (0.71, 0.71, 0.71, 1.0)
# Dead process color.
PROC_COLOR_X = (0.71, 0.71, 0.71, 0.125)
# Paging process color.
PROC_COLOR_W = (0.71, 0.71, 0.71, 0.125)
# Process label color.
PROC_TEXT_COLOR = (0.19, 0.19, 0.19, 1.0)
# Process label font.
PROC_TEXT_FONT_SIZE = 12
# Signature color.
SIG_COLOR = (0.0, 0.0, 0.0, 0.3125)
# Signature font.
SIG_FONT_SIZE = 14
# Signature text.
SIGNATURE = "http://github.com/mmeeks/bootchart"
# Process dependency line color.
DEP_COLOR = (0.75, 0.75, 0.75, 1.0)
# Process dependency line stroke.
DEP_STROKE = 1.0
# Process description date format.
DESC_TIME_FORMAT = "mm:ss.SSS"
# Cumulative coloring bits
HSV_MAX_MOD = 31
HSV_STEP = 7
# Configure task color
TASK_COLOR_CONFIGURE = (1.0, 1.0, 0.00, 1.0)
# Compile task color.
TASK_COLOR_COMPILE = (0.0, 1.00, 0.00, 1.0)
# Install task color
TASK_COLOR_INSTALL = (1.0, 0.00, 1.00, 1.0)
# Sysroot task color
TASK_COLOR_SYSROOT = (0.0, 0.00, 1.00, 1.0)
# Package task color
TASK_COLOR_PACKAGE = (0.0, 1.00, 1.00, 1.0)
# Package Write RPM/DEB/IPK task color
TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
# Process states
STATE_UNDEFINED = 0
STATE_RUNNING = 1
STATE_SLEEPING = 2
STATE_WAITING = 3
STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# CumulativeStats Types
STAT_TYPE_CPU = 0
STAT_TYPE_IO = 1
# Convert ps process state to an int
def get_proc_state(flag):
return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
ctx.set_source_rgba(*color)
ctx.move_to(x, y)
ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
ctx.set_source_rgba(*color)
ctx.rectangle(*rect)
ctx.fill()
def draw_rect(ctx, color, rect):
ctx.set_source_rgba(*color)
ctx.rectangle(*rect)
ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
ctx.fill()
draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
label_w = ctx.text_extents(label)[2]
label_x = x + w / 2 - label_w / 2
if label_w + 10 > w:
label_x = x + w + 5
if label_x + label_w > maxx:
label_x = x - label_w - 5
draw_text(ctx, label, color, label_x, y)
def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
ctx.set_font_size(AXIS_FONT_SIZE)
prev_x = 0
for i in range(0, rect[2] + 1, sec_w):
if ((i / sec_w) % nsecs == 0) :
if options.app_options.as_minutes :
label = "%.1f" % (i / sec_w / 60.0)
else :
label = "%d" % (i / sec_w)
label_w = ctx.text_extents(label)[2]
x = rect[0] + i - label_w/2
if x >= prev_x:
draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
prev_x = x + label_w
def draw_box_ticks(ctx, rect, sec_w):
draw_rect(ctx, BORDER_COLOR, tuple(rect))
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
for i in range(sec_w, rect[2] + 1, sec_w):
if ((i / sec_w) % 10 == 0) :
ctx.set_line_width(1.5)
elif sec_w < 5 :
continue
else :
ctx.set_line_width(1.0)
if ((i / sec_w) % 30 == 0) :
ctx.set_source_rgba(*TICK_COLOR_BOLD)
else :
ctx.set_source_rgba(*TICK_COLOR)
ctx.move_to(rect[0] + i, rect[1] + 1)
ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
ctx.stroke()
ctx.set_line_width(1.0)
ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
ctx.set_source_rgba(*ANNOTATION_COLOR)
ctx.set_dash([4, 4])
for time in times:
if time is not None:
x = ((time - proc_tree.start_time) * rect[2] / proc_tree.duration)
ctx.move_to(rect[0] + x, rect[1] + 1)
ctx.line_to(rect[0] + x, rect[1] + rect[3] - 1)
ctx.stroke()
ctx.set_line_cap(cairo.LINE_CAP_BUTT)
ctx.set_dash([])
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
ctx.set_line_width(0.5)
x_shift = proc_tree.start_time
def transform_point_coords(point, x_base, y_base, \
xscale, yscale, x_trans, y_trans):
x = (point[0] - x_base) * xscale + x_trans
y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
return x, y
max_x = max (x for (x, y) in data)
max_y = max (y for (x, y) in data)
# avoid divide by zero
if max_y == 0:
max_y = 1.0
xscale = float (chart_bounds[2]) / max_x
# If data_range is given, scale the chart so that the value range in
# data_range matches the chart bounds exactly.
# Otherwise, scale so that the actual data matches the chart bounds.
if data_range:
yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
ybase = data_range[0]
else:
yscale = float(chart_bounds[3]) / max_y
ybase = 0
first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
chart_bounds[0], chart_bounds[1])
last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
chart_bounds[0], chart_bounds[1])
ctx.set_source_rgba(*color)
ctx.move_to(*first)
for point in data:
x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
chart_bounds[0], chart_bounds[1])
ctx.line_to(x, y)
if fill:
ctx.stroke_preserve()
ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
ctx.line_to(first[0], first[1])
ctx.fill()
else:
ctx.stroke()
ctx.set_line_width(1.0)
bar_h = 55
meminfo_bar_h = 2 * bar_h
header_h = 60
# offsets
off_x, off_y = 220, 10
sec_w_base = 1 # the width of a second
proc_h = 16 # the height of a process
leg_s = 10
MIN_IMG_W = 800
CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
OPTIONS = None
def extents(options, xscale, trace):
start = min(trace.start.keys())
end = start
processes = 0
for proc in trace.processes:
if not options.app_options.show_all and \
trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
continue
if trace.processes[proc][1] > end:
end = trace.processes[proc][1]
processes += 1
if trace.min is not None and trace.max is not None:
start = trace.min
end = trace.max
w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
h = proc_h * processes + header_h + 2 * off_y
return (w, h)
def clip_visible(clip, rect):
xmax = max (clip[0], rect[0])
ymax = max (clip[1], rect[1])
xmin = min (clip[0] + clip[2], rect[0] + rect[2])
ymin = min (clip[1] + clip[3], rect[1] + rect[3])
return (xmin > xmax and ymin > ymax)
def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
proc_tree = options.proc_tree(trace)
# render bar legend
ctx.set_font_size(LEGEND_FONT_SIZE)
draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
# render I/O wait
chart_rect = (off_x, curr_y+30, w, bar_h)
if clip_visible (clip, chart_rect):
draw_box_ticks (ctx, chart_rect, sec_w)
draw_annotations (ctx, proc_tree, trace.times, chart_rect)
draw_chart (ctx, IO_COLOR, True, chart_rect, \
[(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
proc_tree, None)
# render CPU load
draw_chart (ctx, CPU_COLOR, True, chart_rect, \
[(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
proc_tree, None)
curr_y = curr_y + 30 + bar_h
# render second chart
draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
# render I/O utilization
chart_rect = (off_x, curr_y+30, w, bar_h)
if clip_visible (clip, chart_rect):
draw_box_ticks (ctx, chart_rect, sec_w)
draw_annotations (ctx, proc_tree, trace.times, chart_rect)
draw_chart (ctx, IO_COLOR, True, chart_rect, \
[(sample.time, sample.util) for sample in trace.disk_stats], \
proc_tree, None)
# render disk throughput
max_sample = max (trace.disk_stats, key = lambda s: s.tput)
if clip_visible (clip, chart_rect):
draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
[(sample.time, sample.tput) for sample in trace.disk_stats], \
proc_tree, None)
pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
shift_x, shift_y = -20, 20
if (pos_x < off_x + 245):
shift_x, shift_y = 5, 40
label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
curr_y = curr_y + 30 + bar_h
# render mem usage
chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
mem_stats = trace.mem_stats
if mem_stats and clip_visible (clip, chart_rect):
mem_scale = max(sample.records['MemTotal'] - sample.records['MemFree'] for sample in mem_stats)
draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.records['SwapTotal'] - sample.records['SwapFree'])/1024 for sample in mem_stats]), \
MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
draw_box_ticks(ctx, chart_rect, sec_w)
draw_annotations(ctx, proc_tree, trace.times, chart_rect)
draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
[(sample.time, sample.records['MemTotal'] - sample.records['MemFree']) for sample in trace.mem_stats], \
proc_tree, [0, mem_scale])
draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
[(sample.time, sample.records['MemTotal'] - sample.records['MemFree'] - sample.records['Buffers']) for sample in mem_stats], \
proc_tree, [0, mem_scale])
draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
[(sample.time, sample.records['Cached']) for sample in mem_stats], \
proc_tree, [0, mem_scale])
draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
[(sample.time, float(sample.records['SwapTotal'] - sample.records['SwapFree'])) for sample in mem_stats], \
proc_tree, None)
curr_y = curr_y + meminfo_bar_h
return curr_y
def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - (curr_y+header_h) + proc_h]
draw_legend_box (ctx, "Configure", \
TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
draw_legend_box (ctx, "Compile", \
TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
draw_legend_box (ctx, "Install", \
TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
draw_legend_box (ctx, "Populate Sysroot", \
TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
draw_legend_box (ctx, "Package", \
TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
draw_legend_box (ctx, "Package Write",
TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
ctx.set_font_size(PROC_TEXT_FONT_SIZE)
draw_box_ticks(ctx, chart_rect, sec_w)
draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
y = curr_y+header_h
offset = trace.min or min(trace.start.keys())
for s in sorted(trace.start.keys()):
for val in sorted(trace.start[s]):
if not options.app_options.show_all and \
trace.processes[val][1] - s < options.app_options.mintime:
continue
task = val.split(":")[1]
#print val
#print trace.processes[val][1]
#print s
x = chart_rect[0] + (s - offset) * sec_w
w = ((trace.processes[val][1] - s) * sec_w)
#print "proc at %s %s %s %s" % (x, y, w, proc_h)
col = None
if task == "do_compile":
col = TASK_COLOR_COMPILE
elif task == "do_configure":
col = TASK_COLOR_CONFIGURE
elif task == "do_install":
col = TASK_COLOR_INSTALL
elif task == "do_populate_sysroot":
col = TASK_COLOR_SYSROOT
elif task == "do_package":
col = TASK_COLOR_PACKAGE
elif task == "do_package_write_rpm" or \
task == "do_package_write_deb" or \
task == "do_package_write_ipk":
col = TASK_COLOR_PACKAGE_WRITE
else:
col = WHITE
if col:
draw_fill_rect(ctx, col, (x, y, w, proc_h))
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
y = y + proc_h
return curr_y
#
# Render the chart.
#
def render(ctx, options, xscale, trace):
(w, h) = extents (options, xscale, trace)
global OPTIONS
OPTIONS = options.app_options
# x, y, w, h
clip = ctx.clip_extents()
sec_w = int (xscale * sec_w_base)
ctx.set_line_width(1.0)
ctx.select_font_face(FONT_NAME)
draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
w -= 2*off_x
curr_y = off_y;
curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
return
proc_tree = options.proc_tree (trace)
# draw the title and headers
if proc_tree.idle:
duration = proc_tree.idle
else:
duration = proc_tree.duration
if not options.kernel_only:
curr_y = draw_header (ctx, trace.headers, duration)
else:
curr_y = off_y;
if options.charts:
curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
# draw process boxes
proc_height = h
if proc_tree.taskstats and options.cumulative:
proc_height -= CUML_HEIGHT
draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
curr_y, w, proc_height, sec_w)
curr_y = proc_height
ctx.set_font_size(SIG_FONT_SIZE)
draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
# draw a cumulative CPU-time-per-process graph
if proc_tree.taskstats and options.cumulative:
cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
if clip_visible (clip, cuml_rect):
draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
# draw a cumulative I/O-time-per-process graph
if proc_tree.taskstats and options.cumulative:
cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
if clip_visible (clip, cuml_rect):
draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
header_size = 0
if not options.kernel_only:
draw_legend_box (ctx, "Running (%cpu)",
PROC_COLOR_R, off_x , curr_y + 45, leg_s)
draw_legend_box (ctx, "Unint.sleep (I/O)",
PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
draw_legend_box (ctx, "Sleeping",
PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
draw_legend_box (ctx, "Zombie",
PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
header_size = 45
chart_rect = [off_x, curr_y + header_size + 15,
w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
ctx.set_font_size (PROC_TEXT_FONT_SIZE)
draw_box_ticks (ctx, chart_rect, sec_w)
if sec_w > 100:
nsec = 1
else:
nsec = 5
draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
draw_annotations (ctx, proc_tree, times, chart_rect)
y = curr_y + 60
for root in proc_tree.process_tree:
draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
y = y + proc_h * proc_tree.num_nodes([root])
def draw_header (ctx, headers, duration):
toshow = [
('system.uname', 'uname', lambda s: s),
('system.release', 'release', lambda s: s),
('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)),
('system.kernel.options', 'kernel options', lambda s: s),
]
header_y = ctx.font_extents()[2] + 10
ctx.set_font_size(TITLE_FONT_SIZE)
draw_text(ctx, headers['title'], TEXT_COLOR, off_x, header_y)
ctx.set_font_size(TEXT_FONT_SIZE)
for (headerkey, headertitle, mangle) in toshow:
header_y += ctx.font_extents()[2]
if headerkey in headers:
value = headers.get(headerkey)
else:
value = ""
txt = headertitle + ': ' + mangle(value)
draw_text(ctx, txt, TEXT_COLOR, off_x, header_y)
dur = duration / 100.0
txt = 'time : %02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))
if headers.get('system.maxpid') is not None:
txt = txt + ' max pid: %s' % (headers.get('system.maxpid'))
header_y += ctx.font_extents()[2]
draw_text (ctx, txt, TEXT_COLOR, off_x, header_y)
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
w = ((proc.duration) * rect[2] / proc_tree.duration)
draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
ipid = int(proc.pid)
if not OPTIONS.show_all:
cmdString = proc.cmd
else:
cmdString = ''
if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
cmdString = cmdString + " [" + str(ipid // 1000) + "]"
if OPTIONS.show_all:
if proc.args:
cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
else:
cmdString = cmdString + " " + proc.exe
draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
next_y = y + proc_h
for child in proc.child_list:
if next_y > clip[1] + clip[3]:
break
child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
next_y = next_y + proc_h * proc_tree.num_nodes([child])
return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
return
draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
last_tx = -1
for sample in proc.samples :
tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
# samples are sorted chronologically
if tx < clip[0]:
continue
if tx > clip[0] + clip[2]:
break
tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
if last_tx != -1 and abs(last_tx - tx) <= tw:
tw -= last_tx - tx
tx = last_tx
tw = max (tw, 1) # nice to see at least something
last_tx = tx + tw
state = get_proc_state( sample.state )
color = STATE_COLORS[state]
if state == STATE_RUNNING:
alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
elif state == STATE_SLEEPING:
continue
draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
ctx.set_source_rgba(*DEP_COLOR)
ctx.set_dash([2, 2])
if abs(px - x) < 3:
dep_off_x = 3
dep_off_y = proc_h / 4
ctx.move_to(x, y + proc_h / 2)
ctx.line_to(px - dep_off_x, y + proc_h / 2)
ctx.line_to(px - dep_off_x, py - dep_off_y)
ctx.line_to(px, py - dep_off_y)
else:
ctx.move_to(x, y + proc_h / 2)
ctx.line_to(px, y + proc_h / 2)
ctx.line_to(px, py)
ctx.stroke()
ctx.set_dash([])
# elide the bootchart collector - it is quite distorting
def elide_bootchart(proc):
return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
class CumlSample:
def __init__(self, proc):
self.cmd = proc.cmd
self.samples = []
self.merge_samples (proc)
self.color = None
def merge_samples(self, proc):
self.samples.extend (proc.samples)
self.samples.sort (key = lambda p: p.time)
def next(self):
global palette_idx
palette_idx += HSV_STEP
return palette_idx
def get_color(self):
if self.color is None:
i = self.next() % HSV_MAX_MOD
h = 0.0
if i is not 0:
h = (1.0 * i) / HSV_MAX_MOD
s = 0.5
v = 1.0
c = colorsys.hsv_to_rgb (h, s, v)
self.color = (c[0], c[1], c[2], 1.0)
return self.color
def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
global palette_idx
palette_idx = 0
time_hash = {}
total_time = 0.0
m_proc_list = {}
if stat_type is STAT_TYPE_CPU:
sample_value = 'cpu'
else:
sample_value = 'io'
for proc in proc_tree.process_list:
if elide_bootchart(proc):
continue
for sample in proc.samples:
total_time += getattr(sample.cpu_sample, sample_value)
if not sample.time in time_hash:
time_hash[sample.time] = 1
# merge pids with the same cmd
if not proc.cmd in m_proc_list:
m_proc_list[proc.cmd] = CumlSample (proc)
continue
s = m_proc_list[proc.cmd]
s.merge_samples (proc)
# all the sample times
times = sorted(time_hash)
if len (times) < 2:
print("degenerate boot chart")
return
pix_per_ns = chart_bounds[3] / total_time
# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
# FIXME: we have duplicates in the process list too [!] - why !?
# Render bottom up, left to right
below = {}
for time in times:
below[time] = chart_bounds[1] + chart_bounds[3]
# same colors each time we render
random.seed (0)
ctx.set_line_width(1)
legends = []
labels = []
# render each pid in order
for cs in m_proc_list.values():
row = {}
cuml = 0.0
# print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
for sample in cs.samples:
cuml += getattr(sample.cpu_sample, sample_value)
row[sample.time] = cuml
process_total_time = cuml
# hide really tiny processes
if cuml * pix_per_ns <= 2:
continue
last_time = times[0]
y = last_below = below[last_time]
last_cuml = cuml = 0.0
ctx.set_source_rgba(*cs.get_color())
for time in times:
render_seg = False
# did the underlying trend increase ?
if below[time] != last_below:
last_below = below[last_time]
last_cuml = cuml
render_seg = True
# did we move up a pixel increase ?
if time in row:
nc = round (row[time] * pix_per_ns)
if nc != cuml:
last_cuml = cuml
cuml = nc
render_seg = True
# if last_cuml > cuml:
# assert fail ... - un-sorted process samples
# draw the trailing rectangle from the last time to
# before now, at the height of the last segment.
if render_seg:
w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
ctx.fill()
# ctx.stroke()
last_time = time
y = below [time] - cuml
row[time] = y
# render the last segment
x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
y = below[last_time] - cuml
ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
ctx.fill()
# ctx.stroke()
# render legend if it will fit
if cuml > 8:
label = cs.cmd
extnts = ctx.text_extents(label)
label_w = extnts[2]
label_h = extnts[3]
# print "Text extents %g by %g" % (label_w, label_h)
labels.append((label,
chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
y + (cuml + label_h) / 2))
if cs in legends:
print("ARGH - duplicate process in list !")
legends.append ((cs, process_total_time))
below = row
# render grid-lines over the top
draw_box_ticks(ctx, chart_bounds, sec_w)
# render labels
for l in labels:
draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
# Render legends
font_height = 20
label_width = 300
LEGENDS_PER_COL = 15
LEGENDS_TOTAL = 45
ctx.set_font_size (TITLE_FONT_SIZE)
dur_secs = duration / 100
cpu_secs = total_time / 1000000000
# misleading - with multiple CPUs ...
# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
if stat_type is STAT_TYPE_CPU:
label = "Cumulative CPU usage, by process; total CPU: " \
" %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
else:
label = "Cumulative I/O usage, by process; total I/O: " \
" %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
chart_bounds[1] + font_height)
i = 0
legends = sorted(legends, key=itemgetter(1), reverse=True)
ctx.set_font_size(TEXT_FONT_SIZE)
for t in legends:
cs = t[0]
time = t[1]
x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
draw_legend_box(ctx, str, cs.color, x, y, leg_s)
i = i + 1
if i >= LEGENDS_TOTAL:
break
########NEW FILE########
__FILENAME__ = gui
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
from . import draw
from .draw import RenderOptions
class PyBootchartWidget(gtk.DrawingArea):
__gsignals__ = {
'expose-event': 'override',
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self.hadj = None
self.vadj = None
self.hadj_changed_signal_id = None
self.vadj_changed_signal_id = None
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
self.draw(cr, self.get_allocation())
return False
def draw(self, cr, rect):
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
draw.render(cr, self.options, self.xscale, self.trace)
def position_changed(self):
self.emit("position-changed", self.x, self.y)
ZOOM_INCREMENT = 1.25
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
self._set_scroll_adjustments (self.hadj, self.vadj)
self.queue_draw()
def zoom_to_rect (self, rect):
zoom_ratio = float(rect.width)/float(self.chart_width)
self.zoom_image(zoom_ratio)
self.x = 0
self.position_changed()
def set_xscale(self, xscale):
old_mid_x = self.x + self.hadj.page_size / 2
self.xscale = xscale
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
new_x = old_mid_x
self.zoom_image (self.zoom_ratio)
def on_expand(self, action):
self.set_xscale (int(self.xscale * 1.5 + 0.5))
def on_contract(self, action):
self.set_xscale (max(int(self.xscale / 1.5), 1))
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_rect(self.get_allocation())
def on_zoom_100(self, action):
self.zoom_image(1.0)
self.set_xscale(1.0)
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self._set_scroll_adjustments(self.hadj, self.vadj)
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
if event.state & gtk.gdk.CONTROL_MASK:
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
def on_set_scroll_adjustments(self, area, hadj, vadj):
self._set_scroll_adjustments (hadj, vadj)
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
def _set_adj_upper(self, adj, upper):
changed = False
value_changed = False
if adj.upper != upper:
adj.upper = upper
changed = True
max_value = max(0.0, upper - adj.page_size)
if adj.value > max_value:
adj.value = max_value
value_changed = True
if changed:
adj.changed()
if value_changed:
adj.value_changed()
def _set_scroll_adjustments(self, hadj, vadj):
if hadj == None:
hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if vadj == None:
vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if self.hadj_changed_signal_id != None and \
self.hadj != None and hadj != self.hadj:
self.hadj.disconnect (self.hadj_changed_signal_id)
if self.vadj_changed_signal_id != None and \
self.vadj != None and vadj != self.vadj:
self.vadj.disconnect (self.vadj_changed_signal_id)
if hadj != None:
self.hadj = hadj
self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
if vadj != None:
self.vadj = vadj
self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
def on_adjustments_changed(self, adj):
self.x = self.hadj.value / self.zoom_ratio
self.y = self.vadj.value / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
self.hadj.value = x * self.zoom_ratio
self.vadj.value = y * self.zoom_ratio
PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
class PyBootchartShell(gtk.VBox):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Expand"/>
<toolitem action="Contract"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
self.widget = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(self.ui)
# Scrolled window
scrolled = gtk.ScrolledWindow()
scrolled.add(self.widget)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
hbox.pack_start(toolbar, True, True)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
button.connect ('toggled', self.widget.show_toggled)
button.set_active(options.app_options.show_all)
hbox.pack_start (button, False, True)
self.pack_start(hbox, False)
self.pack_start(scrolled)
self.show_all()
def grab_focus(self, window):
window.set_focus(self.widget)
class PyBootchartWindow(gtk.Window):
def __init__(self, trace, app_options):
gtk.Window.__init__(self)
window = self
window.set_title("Bootchart %s" % trace.filename)
window.set_default_size(750, 550)
tab_page = gtk.Notebook()
tab_page.show()
window.add(tab_page)
full_opts = RenderOptions(app_options)
full_tree = PyBootchartShell(window, trace, full_opts, 1.0)
tab_page.append_page (full_tree, gtk.Label("Full tree"))
if trace.kernel is not None and len (trace.kernel) > 2:
kernel_opts = RenderOptions(app_options)
kernel_opts.cumulative = False
kernel_opts.charts = False
kernel_opts.kernel_only = True
kernel_tree = PyBootchartShell(window, trace, kernel_opts, 5.0)
tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
full_tree.grab_focus(self)
self.show()
def show(trace, options):
win = PyBootchartWindow(trace, options)
win.connect('destroy', gtk.main_quit)
gtk.main()
########NEW FILE########
__FILENAME__ = main
main.py.in
########NEW FILE########
__FILENAME__ = parsing
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import string
import re
import sys
import tarfile
from time import clock
from collections import defaultdict
from functools import reduce
from .samples import *
from .process_tree import ProcessTree
if sys.version_info >= (3, 0):
long = int
# Parsing produces as its end result a 'Trace'
class Trace:
def __init__(self, writer, paths, options):
self.processes = {}
self.start = {}
self.end = {}
self.min = None
self.max = None
self.headers = None
self.disk_stats = None
self.ps_stats = None
self.taskstats = None
self.cpu_stats = None
self.cmdline = None
self.kernel = None
self.kernel_tree = None
self.filename = None
self.parent_map = None
self.mem_stats = None
if len(paths):
parse_paths (writer, self, paths)
if not self.valid():
raise ParseError("empty state: '%s' does not contain a valid bootchart" % ", ".join(paths))
if options.full_time:
self.min = min(self.start.keys())
self.max = max(self.end.keys())
return
# Turn that parsed information into something more useful
# link processes into a tree of pointers, calculate statistics
self.compile(writer)
# Crop the chart to the end of the first idle period after the given
# process
if options.crop_after:
idle = self.crop (writer, options.crop_after)
else:
idle = None
# Annotate other times as the first start point of given process lists
self.times = [ idle ]
if options.annotate:
for procnames in options.annotate:
names = [x[:15] for x in procnames.split(",")]
for proc in self.ps_stats.process_map.values():
if proc.cmd in names:
self.times.append(proc.start_time)
break
else:
self.times.append(None)
self.proc_tree = ProcessTree(writer, self.kernel, self.ps_stats,
self.ps_stats.sample_period,
self.headers.get("profile.process"),
options.prune, idle, self.taskstats,
self.parent_map is not None)
if self.kernel is not None:
self.kernel_tree = ProcessTree(writer, self.kernel, None, 0,
self.headers.get("profile.process"),
False, None, None, True)
def valid(self):
return len(self.processes) != 0
return self.headers != None and self.disk_stats != None and \
self.ps_stats != None and self.cpu_stats != None
def add_process(self, process, start, end):
self.processes[process] = [start, end]
if start not in self.start:
self.start[start] = []
if process not in self.start[start]:
self.start[start].append(process)
if end not in self.end:
self.end[end] = []
if process not in self.end[end]:
self.end[end].append(process)
def compile(self, writer):
def find_parent_id_for(pid):
if pid is 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
# many of these double forks are so short lived
# that we have no samples, or process info for them
# so climb the parent hierarcy to find one
if int (ppid * 1000) not in self.ps_stats.process_map:
# print "Pid '%d' short lived with no process" % ppid
ppid = find_parent_id_for (ppid)
# else:
# print "Pid '%d' has an entry" % ppid
else:
# print "Pid '%d' missing from pid map" % pid
return 0
return ppid
# merge in the cmdline data
if self.cmdline is not None:
for proc in self.ps_stats.process_map.values():
rpid = int (proc.pid // 1000)
if rpid in self.cmdline:
cmd = self.cmdline[rpid]
proc.exe = cmd['exe']
proc.args = cmd['args']
# else:
# print "proc %d '%s' not in cmdline" % (rpid, proc.exe)
# re-parent any stray orphans if we can
if self.parent_map is not None:
for process in self.ps_stats.process_map.values():
ppid = find_parent_id_for (int(process.pid // 1000))
if ppid:
process.ppid = ppid * 1000
# stitch the tree together with pointers
for process in self.ps_stats.process_map.values():
process.set_parent (self.ps_stats.process_map)
# count on fingers variously
for process in self.ps_stats.process_map.values():
process.calc_stats (self.ps_stats.sample_period)
def crop(self, writer, crop_after):
def is_idle_at(util, start, j):
k = j + 1
while k < len(util) and util[k][0] < start + 300:
k += 1
k = min(k, len(util)-1)
if util[j][1] >= 0.25:
return False
avgload = sum(u[1] for u in util[j:k+1]) / (k-j+1)
if avgload < 0.25:
return True
else:
return False
def is_idle(util, start):
for j in range(0, len(util)):
if util[j][0] < start:
continue
return is_idle_at(util, start, j)
else:
return False
names = [x[:15] for x in crop_after.split(",")]
for proc in self.ps_stats.process_map.values():
if proc.cmd in names or proc.exe in names:
writer.info("selected proc '%s' from list (start %d)"
% (proc.cmd, proc.start_time))
break
if proc is None:
writer.warn("no selected crop proc '%s' in list" % crop_after)
cpu_util = [(sample.time, sample.user + sample.sys + sample.io) for sample in self.cpu_stats]
disk_util = [(sample.time, sample.util) for sample in self.disk_stats]
idle = None
for i in range(0, len(cpu_util)):
if cpu_util[i][0] < proc.start_time:
continue
if is_idle_at(cpu_util, cpu_util[i][0], i) \
and is_idle(disk_util, cpu_util[i][0]):
idle = cpu_util[i][0]
break
if idle is None:
writer.warn ("not idle after proc '%s'" % crop_after)
return None
crop_at = idle + 300
writer.info ("cropping at time %d" % crop_at)
while len (self.cpu_stats) \
and self.cpu_stats[-1].time > crop_at:
self.cpu_stats.pop()
while len (self.disk_stats) \
and self.disk_stats[-1].time > crop_at:
self.disk_stats.pop()
self.ps_stats.end_time = crop_at
cropped_map = {}
for key, value in self.ps_stats.process_map.items():
if (value.start_time <= crop_at):
cropped_map[key] = value
for proc in cropped_map.values():
proc.duration = min (proc.duration, crop_at - proc.start_time)
while len (proc.samples) \
and proc.samples[-1].time > crop_at:
proc.samples.pop()
self.ps_stats.process_map = cropped_map
return idle
class ParseError(Exception):
"""Represents errors during parse of the bootchart."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def _parse_headers(file):
"""Parses the headers of the bootchart."""
def parse(acc, line):
(headers, last) = acc
if '=' in line:
last, value = map (lambda x: x.strip(), line.split('=', 1))
else:
value = line.strip()
headers[last] += value
return headers, last
return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
def _parse_timed_blocks(file):
"""Parses (ie., splits) a file into so-called timed-blocks. A
timed-block consists of a timestamp on a line by itself followed
by zero or more lines of data for that point in time."""
def parse(block):
lines = block.split('\n')
if not lines:
raise ParseError('expected a timed-block consisting a timestamp followed by data lines')
try:
return (int(lines[0]), lines[1:])
except ValueError:
raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
blocks = file.read().decode('utf-8').split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
def _parse_proc_ps_log(writer, file):
"""
* See proc(5) for details.
*
* {pid, comm, state, ppid, pgrp, session, tty_nr, tpgid, flags, minflt, cminflt, majflt, cmajflt, utime, stime,
* cutime, cstime, priority, nice, 0, itrealvalue, starttime, vsize, rss, rlim, startcode, endcode, startstack,
* kstkesp, kstkeip}
"""
processMap = {}
ltime = 0
timed_blocks = _parse_timed_blocks(file)
for time, lines in timed_blocks:
for line in lines:
if not line: continue
tokens = line.split(' ')
if len(tokens) < 21:
continue
offset = [index for index, token in enumerate(tokens[1:]) if token[-1] == ')'][0]
pid, cmd, state, ppid = int(tokens[0]), ' '.join(tokens[1:2+offset]), tokens[2+offset], int(tokens[3+offset])
userCpu, sysCpu, stime = int(tokens[13+offset]), int(tokens[14+offset]), int(tokens[21+offset])
# magic fixed point-ness ...
pid *= 1000
ppid *= 1000
if pid in processMap:
process = processMap[pid]
process.cmd = cmd.strip('()') # why rename after latest name??
else:
process = Process(writer, pid, cmd.strip('()'), ppid, min(time, stime))
processMap[pid] = process
if process.last_user_cpu_time is not None and process.last_sys_cpu_time is not None and ltime is not None:
userCpuLoad, sysCpuLoad = process.calc_load(userCpu, sysCpu, max(1, time - ltime))
cpuSample = CPUSample('null', userCpuLoad, sysCpuLoad, 0.0)
process.samples.append(ProcessSample(time, state, cpuSample))
process.last_user_cpu_time = userCpu
process.last_sys_cpu_time = sysCpu
ltime = time
if len (timed_blocks) < 2:
return None
startTime = timed_blocks[0][0]
avgSampleLength = (ltime - startTime)/(len (timed_blocks) - 1)
return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
def _parse_taskstats_log(writer, file):
"""
* See bootchart-collector.c for details.
*
* { pid, ppid, comm, cpu_run_real_total, blkio_delay_total, swapin_delay_total }
*
"""
processMap = {}
pidRewrites = {}
ltime = None
timed_blocks = _parse_timed_blocks(file)
for time, lines in timed_blocks:
# we have no 'stime' from taskstats, so prep 'init'
if ltime is None:
process = Process(writer, 1, '[init]', 0, 0)
processMap[1000] = process
ltime = time
# continue
for line in lines:
if not line: continue
tokens = line.split(' ')
if len(tokens) != 6:
continue
opid, ppid, cmd = int(tokens[0]), int(tokens[1]), tokens[2]
cpu_ns, blkio_delay_ns, swapin_delay_ns = long(tokens[-3]), long(tokens[-2]), long(tokens[-1]),
# make space for trees of pids
opid *= 1000
ppid *= 1000
# when the process name changes, we re-write the pid.
if opid in pidRewrites:
pid = pidRewrites[opid]
else:
pid = opid
cmd = cmd.strip('(').strip(')')
if pid in processMap:
process = processMap[pid]
if process.cmd != cmd:
pid += 1
pidRewrites[opid] = pid
# print "process mutation ! '%s' vs '%s' pid %s -> pid %s\n" % (process.cmd, cmd, opid, pid)
process = process.split (writer, pid, cmd, ppid, time)
processMap[pid] = process
else:
process.cmd = cmd;
else:
process = Process(writer, pid, cmd, ppid, time)
processMap[pid] = process
delta_cpu_ns = (float) (cpu_ns - process.last_cpu_ns)
delta_blkio_delay_ns = (float) (blkio_delay_ns - process.last_blkio_delay_ns)
delta_swapin_delay_ns = (float) (swapin_delay_ns - process.last_swapin_delay_ns)
# make up some state data ...
if delta_cpu_ns > 0:
state = "R"
elif delta_blkio_delay_ns + delta_swapin_delay_ns > 0:
state = "D"
else:
state = "S"
# retain the ns timing information into a CPUSample - that tries
# with the old-style to be a %age of CPU used in this time-slice.
if delta_cpu_ns + delta_blkio_delay_ns + delta_swapin_delay_ns > 0:
# print "proc %s cpu_ns %g delta_cpu %g" % (cmd, cpu_ns, delta_cpu_ns)
cpuSample = CPUSample('null', delta_cpu_ns, 0.0,
delta_blkio_delay_ns,
delta_swapin_delay_ns)
process.samples.append(ProcessSample(time, state, cpuSample))
process.last_cpu_ns = cpu_ns
process.last_blkio_delay_ns = blkio_delay_ns
process.last_swapin_delay_ns = swapin_delay_ns
ltime = time
if len (timed_blocks) < 2:
return None
startTime = timed_blocks[0][0]
avgSampleLength = (ltime - startTime)/(len(timed_blocks)-1)
return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
def _parse_proc_stat_log(file):
samples = []
ltimes = None
for time, lines in _parse_timed_blocks(file):
# skip emtpy lines
if not lines:
continue
# CPU times {user, nice, system, idle, io_wait, irq, softirq}
tokens = lines[0].split()
times = [ int(token) for token in tokens[1:] ]
if ltimes:
user = float((times[0] + times[1]) - (ltimes[0] + ltimes[1]))
system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6]))
idle = float(times[3] - ltimes[3])
iowait = float(times[4] - ltimes[4])
aSum = max(user + system + idle + iowait, 1)
samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) )
ltimes = times
# skip the rest of statistics lines
return samples
def _parse_proc_disk_stat_log(file, numCpu):
"""
Parse file for disk stats, but only look at the whole device, eg. sda,
not sda1, sda2 etc. The format of relevant lines should be:
{major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq}
"""
disk_regex_re = re.compile ('^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
# this gets called an awful lot.
def is_relevant_line(linetokens):
if len(linetokens) != 14:
return False
disk = linetokens[2]
return disk_regex_re.match(disk)
disk_stat_samples = []
for time, lines in _parse_timed_blocks(file):
sample = DiskStatSample(time)
relevant_tokens = [linetokens for linetokens in map (lambda x: x.split(),lines) if is_relevant_line(linetokens)]
for tokens in relevant_tokens:
disk, rsect, wsect, use = tokens[2], int(tokens[5]), int(tokens[9]), int(tokens[12])
sample.add_diskdata([rsect, wsect, use])
disk_stat_samples.append(sample)
disk_stats = []
for sample1, sample2 in zip(disk_stat_samples[:-1], disk_stat_samples[1:]):
interval = sample1.time - sample2.time
if interval == 0:
interval = 1
sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ]
readTput = sums[0] / 2.0 * 100.0 / interval
writeTput = sums[1] / 2.0 * 100.0 / interval
util = float( sums[2] ) / 10 / interval / numCpu
util = max(0.0, min(1.0, util))
disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util))
return disk_stats
def _parse_proc_meminfo_log(file):
"""
Parse file for global memory statistics.
The format of relevant lines should be: ^key: value( unit)?
"""
used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
mem_stats = []
meminfo_re = re.compile(r'([^ \t:]+):\s*(\d+).*')
for time, lines in _parse_timed_blocks(file):
sample = MemSample(time)
for line in lines:
match = meminfo_re.match(line)
if not match:
raise ParseError("Invalid meminfo line \"%s\"" % match.groups(0))
sample.add_value(match.group(1), int(match.group(2)))
if sample.valid():
mem_stats.append(sample)
return mem_stats
# if we boot the kernel with: initcall_debug printk.time=1 we can
# get all manner of interesting data from the dmesg output
# We turn this into a pseudo-process tree: each event is
# characterised by a
# we don't try to detect a "kernel finished" state - since the kernel
# continues to do interesting things after init is called.
#
# sample input:
# [ 0.000000] ACPI: FACP 3f4fc000 000F4 (v04 INTEL Napa 00000001 MSFT 01000013)
# ...
# [ 0.039993] calling migration_init+0x0/0x6b @ 1
# [ 0.039993] initcall migration_init+0x0/0x6b returned 1 after 0 usecs
def _parse_dmesg(writer, file):
timestamp_re = re.compile ("^\[\s*(\d+\.\d+)\s*]\s+(.*)$")
split_re = re.compile ("^(\S+)\s+([\S\+_-]+) (.*)$")
processMap = {}
idx = 0
inc = 1.0 / 1000000
kernel = Process(writer, idx, "k-boot", 0, 0.1)
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
for line in file.read().decode('utf-8').split('\n'):
t = timestamp_re.match (line)
if t is None:
# print "duff timestamp " + line
continue
time_ms = float (t.group(1)) * 1000
# looks like we may have a huge diff after the clock
# has been set up. This could lead to huge graph:
# so huge we will be killed by the OOM.
# So instead of using the plain timestamp we will
# use a delta to first one and skip the first one
# for convenience
if max_ts == 0 and not base_ts and time_ms > 1000:
base_ts = time_ms
continue
max_ts = max(time_ms, max_ts)
if base_ts:
# print "fscked clock: used %f instead of %f" % (time_ms - base_ts, time_ms)
time_ms -= base_ts
m = split_re.match (t.group(2))
if m is None:
continue
# print "match: '%s'" % (m.group(1))
type = m.group(1)
func = m.group(2)
rest = m.group(3)
if t.group(2).startswith ('Write protecting the') or \
t.group(2).startswith ('Freeing unused kernel memory'):
kernel.duration = time_ms / 10
continue
# print "foo: '%s' '%s' '%s'" % (type, func, rest)
if type == "calling":
ppid = kernel.pid
p = re.match ("\@ (\d+)", rest)
if p is not None:
ppid = float (p.group(1)) // 1000
# print "match: '%s' ('%g') at '%s'" % (func, ppid, time_ms)
name = func.split ('+', 1) [0]
idx += inc
processMap[func] = Process(writer, ppid + idx, name, ppid, time_ms / 10)
elif type == "initcall":
# print "finished: '%s' at '%s'" % (func, time_ms)
if func in processMap:
process = processMap[func]
process.duration = (time_ms / 10) - process.start_time
else:
print("corrupted init call for %s" % (func))
elif type == "async_waiting" or type == "async_continuing":
continue # ignore
return processMap.values()
#
# Parse binary pacct accounting file output if we have one
# cf. /usr/include/linux/acct.h
#
def _parse_pacct(writer, file):
# read LE int32
def _read_le_int32(file):
byts = file.read(4)
return (ord(byts[0])) | (ord(byts[1]) << 8) | \
(ord(byts[2]) << 16) | (ord(byts[3]) << 24)
parent_map = {}
parent_map[0] = 0
while file.read(1) != "": # ignore flags
ver = file.read(1)
if ord(ver) < 3:
print("Invalid version 0x%x" % (ord(ver)))
return None
file.seek (14, 1) # user, group etc.
pid = _read_le_int32 (file)
ppid = _read_le_int32 (file)
# print "Parent of %d is %d" % (pid, ppid)
parent_map[pid] = ppid
file.seek (4 + 4 + 16, 1) # timings
file.seek (16, 1) # acct_comm
return parent_map
def _parse_paternity_log(writer, file):
parent_map = {}
parent_map[0] = 0
for line in file.read().decode('utf-8').split('\n'):
if not line:
continue
elems = line.split(' ') # <Child> <Parent>
if len (elems) >= 2:
# print "paternity of %d is %d" % (int(elems[0]), int(elems[1]))
parent_map[int(elems[0])] = int(elems[1])
else:
print("Odd paternity line '%s'" % (line))
return parent_map
def _parse_cmdline_log(writer, file):
cmdLines = {}
for block in file.read().decode('utf-8').split('\n\n'):
lines = block.split('\n')
if len (lines) >= 3:
# print "Lines '%s'" % (lines[0])
pid = int (lines[0])
values = {}
values['exe'] = lines[1].lstrip(':')
args = lines[2].lstrip(':').split('\0')
args.pop()
values['args'] = args
cmdLines[pid] = values
return cmdLines
def get_num_cpus(headers):
"""Get the number of CPUs from the system.cpu header property. As the
CPU utilization graphs are relative, the number of CPUs currently makes
no difference."""
if headers is None:
return 1
if headers.get("system.cpu.num"):
return max (int (headers.get("system.cpu.num")), 1)
cpu_model = headers.get("system.cpu")
if cpu_model is None:
return 1
mat = re.match(".*\\((\\d+)\\)", cpu_model)
if mat is None:
return 1
return max (int(mat.group(1)), 1)
def _do_parse(writer, state, filename, file):
writer.info("parsing '%s'" % filename)
t1 = clock()
paths = filename.split("/")
task = paths[-1]
pn = paths[-2]
start = None
end = None
for line in file:
if line.startswith("Started:"):
start = int(float(line.split()[-1]))
elif line.startswith("Ended:"):
end = int(float(line.split()[-1]))
if start and end:
state.add_process(pn + ":" + task, start, end)
t2 = clock()
writer.info(" %s seconds" % str(t2-t1))
return state
def parse_file(writer, state, filename):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
with open(filename, "rb") as file:
return _do_parse(writer, state, filename, file)
def parse_paths(writer, state, paths):
for path in paths:
if state.filename is None:
state.filename = path
root, extension = os.path.splitext(path)
if not(os.path.exists(path)):
writer.warn("warning: path '%s' does not exist, ignoring." % path)
continue
#state.filename = path
if os.path.isdir(path):
files = sorted([os.path.join(path, f) for f in os.listdir(path)])
state = parse_paths(writer, state, files)
elif extension in [".tar", ".tgz", ".gz"]:
if extension == ".gz":
root, extension = os.path.splitext(root)
if extension != ".tar":
writer.warn("warning: can only handle zipped tar files, not zipped '%s'-files; ignoring" % extension)
continue
tf = None
try:
writer.status("parsing '%s'" % path)
tf = tarfile.open(path, 'r:*')
for name in tf.getnames():
state = _do_parse(writer, state, name, tf.extractfile(name))
except tarfile.ReadError as error:
raise ParseError("error: could not read tarfile '%s': %s." % (path, error))
finally:
if tf != None:
tf.close()
else:
state = parse_file(writer, state, path)
return state
def split_res(res, options):
""" Split the res into n pieces """
res_list = []
if options.num > 1:
s_list = sorted(res.start.keys())
frag_size = len(s_list) / float(options.num)
# Need the top value
if frag_size > int(frag_size):
frag_size = int(frag_size + 1)
else:
frag_size = int(frag_size)
start = 0
end = frag_size
while start < end:
state = Trace(None, [], None)
if options.full_time:
state.min = min(res.start.keys())
state.max = max(res.end.keys())
for i in range(start, end):
# Add this line for reference
#state.add_process(pn + ":" + task, start, end)
for p in res.start[s_list[i]]:
state.add_process(p, s_list[i], res.processes[p][1])
start = end
end = end + frag_size
if end > len(s_list):
end = len(s_list)
res_list.append(state)
else:
res_list.append(res)
return res_list
########NEW FILE########
__FILENAME__ = process_tree
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
class ProcessTree:
"""ProcessTree encapsulates a process tree. The tree is built from log files
retrieved during the boot process. When building the process tree, it is
pruned and merged in order to be able to visualize it in a comprehensible
manner.
The following pruning techniques are used:
* idle processes that keep running during the last process sample
(which is a heuristic for a background processes) are removed,
* short-lived processes (i.e. processes that only live for the
duration of two samples or less) are removed,
* the processes used by the boot logger are removed,
* exploders (i.e. processes that are known to spawn huge meaningless
process subtrees) have their subtrees merged together,
* siblings (i.e. processes with the same command line living
concurrently -- thread heuristic) are merged together,
* process runs (unary trees with processes sharing the command line)
are merged together.
"""
LOGGER_PROC = 'bootchart-colle'
EXPLODER_PROCESSES = set(['hwup'])
def __init__(self, writer, kernel, psstats, sample_period,
monitoredApp, prune, idle, taskstats,
accurate_parentage, for_testing = False):
self.writer = writer
self.process_tree = []
self.taskstats = taskstats
if psstats is None:
process_list = kernel
elif kernel is None:
process_list = psstats.process_map.values()
else:
process_list = list(kernel) + list(psstats.process_map.values())
self.process_list = sorted(process_list, key = lambda p: p.pid)
self.sample_period = sample_period
self.build()
if not accurate_parentage:
self.update_ppids_for_daemons(self.process_list)
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
self.duration = self.end_time - self.start_time
self.idle = idle
if for_testing:
return
removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False)
writer.status("merged %i logger processes" % removed)
if prune:
p_processes = self.prune(self.process_tree, None)
p_exploders = self.merge_exploders(self.process_tree, self.EXPLODER_PROCESSES)
p_threads = self.merge_siblings(self.process_tree)
p_runs = self.merge_runs(self.process_tree)
writer.status("pruned %i process, %i exploders, %i threads, and %i runs" % (p_processes, p_exploders, p_threads, p_runs))
self.sort(self.process_tree)
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
self.duration = self.end_time - self.start_time
self.num_proc = self.num_nodes(self.process_tree)
def build(self):
"""Build the process tree from the list of top samples."""
self.process_tree = []
for proc in self.process_list:
if not proc.parent:
self.process_tree.append(proc)
else:
proc.parent.child_list.append(proc)
def sort(self, process_subtree):
"""Sort process tree."""
for p in process_subtree:
p.child_list.sort(key = lambda p: p.pid)
self.sort(p.child_list)
def num_nodes(self, process_list):
"Counts the number of nodes in the specified process tree."""
nodes = 0
for proc in process_list:
nodes = nodes + self.num_nodes(proc.child_list)
return nodes + len(process_list)
def get_start_time(self, process_subtree):
"""Returns the start time of the process subtree. This is the start
time of the earliest process.
"""
if not process_subtree:
return 100000000
return min( [min(proc.start_time, self.get_start_time(proc.child_list)) for proc in process_subtree] )
def get_end_time(self, process_subtree):
"""Returns the end time of the process subtree. This is the end time
of the last collected sample.
"""
if not process_subtree:
return -100000000
return max( [max(proc.start_time + proc.duration, self.get_end_time(proc.child_list)) for proc in process_subtree] )
def get_max_pid(self, process_subtree):
"""Returns the max PID found in the process tree."""
if not process_subtree:
return -100000000
return max( [max(proc.pid, self.get_max_pid(proc.child_list)) for proc in process_subtree] )
def update_ppids_for_daemons(self, process_list):
"""Fedora hack: when loading the system services from rc, runuser(1)
is used. This sets the PPID of all daemons to 1, skewing
the process tree. Try to detect this and set the PPID of
these processes the PID of rc.
"""
rcstartpid = -1
rcendpid = -1
rcproc = None
for p in process_list:
if p.cmd == "rc" and p.ppid // 1000 == 1:
rcproc = p
rcstartpid = p.pid
rcendpid = self.get_max_pid(p.child_list)
if rcstartpid != -1 and rcendpid != -1:
for p in process_list:
if p.pid > rcstartpid and p.pid < rcendpid and p.ppid // 1000 == 1:
p.ppid = rcstartpid
p.parent = rcproc
for p in process_list:
p.child_list = []
self.build()
def prune(self, process_subtree, parent):
"""Prunes the process tree by removing idle processes and processes
that only live for the duration of a single top sample. Sibling
processes with the same command line (i.e. threads) are merged
together. This filters out sleepy background processes, short-lived
processes and bootcharts' analysis tools.
"""
def is_idle_background_process_without_children(p):
process_end = p.start_time + p.duration
return not p.active and \
process_end >= self.start_time + self.duration and \
p.start_time > self.start_time and \
p.duration > 0.9 * self.duration and \
self.num_nodes(p.child_list) == 0
num_removed = 0
idx = 0
while idx < len(process_subtree):
p = process_subtree[idx]
if parent != None or len(p.child_list) == 0:
prune = False
if is_idle_background_process_without_children(p):
prune = True
elif p.duration <= 2 * self.sample_period:
# short-lived process
prune = True
if prune:
process_subtree.pop(idx)
for c in p.child_list:
process_subtree.insert(idx, c)
num_removed += 1
continue
else:
num_removed += self.prune(p.child_list, p)
else:
num_removed += self.prune(p.child_list, p)
idx += 1
return num_removed
def merge_logger(self, process_subtree, logger_proc, monitored_app, app_tree):
"""Merges the logger's process subtree. The logger will typically
spawn lots of sleep and cat processes, thus polluting the
process tree.
"""
num_removed = 0
for p in process_subtree:
is_app_tree = app_tree
if logger_proc == p.cmd and not app_tree:
is_app_tree = True
num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
# don't remove the logger itself
continue
if app_tree and monitored_app != None and monitored_app == p.cmd:
is_app_tree = False
if is_app_tree:
for child in p.child_list:
self.merge_processes(p, child)
num_removed += 1
p.child_list = []
else:
num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
return num_removed
def merge_exploders(self, process_subtree, processes):
"""Merges specific process subtrees (used for processes which usually
spawn huge meaningless process trees).
"""
num_removed = 0
for p in process_subtree:
if processes in processes and len(p.child_list) > 0:
subtreemap = self.getProcessMap(p.child_list)
for child in subtreemap.values():
self.merge_processes(p, child)
num_removed += len(subtreemap)
p.child_list = []
p.cmd += " (+)"
else:
num_removed += self.merge_exploders(p.child_list, processes)
return num_removed
def merge_siblings(self, process_subtree):
"""Merges thread processes. Sibling processes with the same command
line are merged together.
"""
num_removed = 0
idx = 0
while idx < len(process_subtree)-1:
p = process_subtree[idx]
nextp = process_subtree[idx+1]
if nextp.cmd == p.cmd:
process_subtree.pop(idx+1)
idx -= 1
num_removed += 1
p.child_list.extend(nextp.child_list)
self.merge_processes(p, nextp)
num_removed += self.merge_siblings(p.child_list)
idx += 1
if len(process_subtree) > 0:
p = process_subtree[-1]
num_removed += self.merge_siblings(p.child_list)
return num_removed
def merge_runs(self, process_subtree):
"""Merges process runs. Single child processes which share the same
command line with the parent are merged.
"""
num_removed = 0
idx = 0
while idx < len(process_subtree):
p = process_subtree[idx]
if len(p.child_list) == 1 and p.child_list[0].cmd == p.cmd:
child = p.child_list[0]
p.child_list = list(child.child_list)
self.merge_processes(p, child)
num_removed += 1
continue
num_removed += self.merge_runs(p.child_list)
idx += 1
return num_removed
def merge_processes(self, p1, p2):
"""Merges two process' samples."""
p1.samples.extend(p2.samples)
p1.samples.sort( key = lambda p: p.time )
p1time = p1.start_time
p2time = p2.start_time
p1.start_time = min(p1time, p2time)
pendtime = max(p1time + p1.duration, p2time + p2.duration)
p1.duration = pendtime - p1.start_time
########NEW FILE########
__FILENAME__ = samples
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
class DiskStatSample:
def __init__(self, time):
self.time = time
self.diskdata = [0, 0, 0]
def add_diskdata(self, new_diskdata):
self.diskdata = [ a + b for a, b in zip(self.diskdata, new_diskdata) ]
class CPUSample:
def __init__(self, time, user, sys, io = 0.0, swap = 0.0):
self.time = time
self.user = user
self.sys = sys
self.io = io
self.swap = swap
@property
def cpu(self):
return self.user + self.sys
def __str__(self):
return str(self.time) + "\t" + str(self.user) + "\t" + \
str(self.sys) + "\t" + str(self.io) + "\t" + str (self.swap)
class MemSample:
used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
def __init__(self, time):
self.time = time
self.records = {}
def add_value(self, name, value):
if name in MemSample.used_values:
self.records[name] = value
def valid(self):
keys = self.records.keys()
# discard incomplete samples
return [v for v in MemSample.used_values if v not in keys] == []
class ProcessSample:
def __init__(self, time, state, cpu_sample):
self.time = time
self.state = state
self.cpu_sample = cpu_sample
def __str__(self):
return str(self.time) + "\t" + str(self.state) + "\t" + str(self.cpu_sample)
class ProcessStats:
def __init__(self, writer, process_map, sample_count, sample_period, start_time, end_time):
self.process_map = process_map
self.sample_count = sample_count
self.sample_period = sample_period
self.start_time = start_time
self.end_time = end_time
writer.info ("%d samples, avg. sample length %f" % (self.sample_count, self.sample_period))
writer.info ("process list size: %d" % len (self.process_map.values()))
class Process:
def __init__(self, writer, pid, cmd, ppid, start_time):
self.writer = writer
self.pid = pid
self.cmd = cmd
self.exe = cmd
self.args = []
self.ppid = ppid
self.start_time = start_time
self.duration = 0
self.samples = []
self.parent = None
self.child_list = []
self.active = None
self.last_user_cpu_time = None
self.last_sys_cpu_time = None
self.last_cpu_ns = 0
self.last_blkio_delay_ns = 0
self.last_swapin_delay_ns = 0
# split this process' run - triggered by a name change
def split(self, writer, pid, cmd, ppid, start_time):
split = Process (writer, pid, cmd, ppid, start_time)
split.last_cpu_ns = self.last_cpu_ns
split.last_blkio_delay_ns = self.last_blkio_delay_ns
split.last_swapin_delay_ns = self.last_swapin_delay_ns
return split
def __str__(self):
return " ".join([str(self.pid), self.cmd, str(self.ppid), '[ ' + str(len(self.samples)) + ' samples ]' ])
def calc_stats(self, samplePeriod):
if self.samples:
firstSample = self.samples[0]
lastSample = self.samples[-1]
self.start_time = min(firstSample.time, self.start_time)
self.duration = lastSample.time - self.start_time + samplePeriod
activeCount = sum( [1 for sample in self.samples if sample.cpu_sample and sample.cpu_sample.sys + sample.cpu_sample.user + sample.cpu_sample.io > 0.0] )
activeCount = activeCount + sum( [1 for sample in self.samples if sample.state == 'D'] )
self.active = (activeCount>2)
def calc_load(self, userCpu, sysCpu, interval):
userCpuLoad = float(userCpu - self.last_user_cpu_time) / interval
sysCpuLoad = float(sysCpu - self.last_sys_cpu_time) / interval
cpuLoad = userCpuLoad + sysCpuLoad
# normalize
if cpuLoad > 1.0:
userCpuLoad = userCpuLoad / cpuLoad
sysCpuLoad = sysCpuLoad / cpuLoad
return (userCpuLoad, sysCpuLoad)
def set_parent(self, processMap):
if self.ppid != None:
self.parent = processMap.get (self.ppid)
if self.parent == None and self.pid // 1000 > 1 and \
not (self.ppid == 2000 or self.pid == 2000): # kernel threads: ppid=2
self.writer.warn("Missing CONFIG_PROC_EVENTS: no parent for pid '%i' ('%s') with ppid '%i'" \
% (self.pid,self.cmd,self.ppid))
def get_end_time(self):
return self.start_time + self.duration
class DiskSample:
def __init__(self, time, read, write, util):
self.time = time
self.read = read
self.write = write
self.util = util
self.tput = read + write
def __str__(self):
return "\t".join([str(self.time), str(self.read), str(self.write), str(self.util)])
########NEW FILE########
__FILENAME__ = parser_test
import sys, os, re, struct, operator, math
from collections import defaultdict
import unittest
sys.path.insert(0, os.getcwd())
import pybootchartgui.parsing as parsing
import pybootchartgui.main as main
debug = False
def floatEq(f1, f2):
return math.fabs(f1-f2) < 0.00001
bootchart_dir = os.path.join(os.path.dirname(sys.argv[0]), '../../examples/1/')
parser = main._mk_options_parser()
options, args = parser.parse_args(['--q', bootchart_dir])
writer = main._mk_writer(options)
class TestBCParser(unittest.TestCase):
def setUp(self):
self.name = "My first unittest"
self.rootdir = bootchart_dir
def mk_fname(self,f):
return os.path.join(self.rootdir, f)
def testParseHeader(self):
trace = parsing.Trace(writer, args, options)
state = parsing.parse_file(writer, trace, self.mk_fname('header'))
self.assertEqual(6, len(state.headers))
self.assertEqual(2, parsing.get_num_cpus(state.headers))
def test_parseTimedBlocks(self):
trace = parsing.Trace(writer, args, options)
state = parsing.parse_file(writer, trace, self.mk_fname('proc_diskstats.log'))
self.assertEqual(141, len(state.disk_stats))
def testParseProcPsLog(self):
trace = parsing.Trace(writer, args, options)
state = parsing.parse_file(writer, trace, self.mk_fname('proc_ps.log'))
samples = state.ps_stats
processes = samples.process_map
sorted_processes = [processes[k] for k in sorted(processes.keys())]
ps_data = open(self.mk_fname('extract2.proc_ps.log'))
for index, line in enumerate(ps_data):
tokens = line.split();
process = sorted_processes[index]
if debug:
print(tokens[0:4])
print(process.pid / 1000, process.cmd, process.ppid, len(process.samples))
print('-------------------')
self.assertEqual(tokens[0], str(process.pid // 1000))
self.assertEqual(tokens[1], str(process.cmd))
self.assertEqual(tokens[2], str(process.ppid // 1000))
self.assertEqual(tokens[3], str(len(process.samples)))
ps_data.close()
def testparseProcDiskStatLog(self):
trace = parsing.Trace(writer, args, options)
state_with_headers = parsing.parse_file(writer, trace, self.mk_fname('header'))
state_with_headers.headers['system.cpu'] = 'xxx (2)'
samples = parsing.parse_file(writer, state_with_headers, self.mk_fname('proc_diskstats.log')).disk_stats
self.assertEqual(141, len(samples))
diskstats_data = open(self.mk_fname('extract.proc_diskstats.log'))
for index, line in enumerate(diskstats_data):
tokens = line.split('\t')
sample = samples[index]
if debug:
print(line.rstrip())
print(sample)
print('-------------------')
self.assertEqual(tokens[0], str(sample.time))
self.assert_(floatEq(float(tokens[1]), sample.read))
self.assert_(floatEq(float(tokens[2]), sample.write))
self.assert_(floatEq(float(tokens[3]), sample.util))
diskstats_data.close()
def testparseProcStatLog(self):
trace = parsing.Trace(writer, args, options)
samples = parsing.parse_file(writer, trace, self.mk_fname('proc_stat.log')).cpu_stats
self.assertEqual(141, len(samples))
stat_data = open(self.mk_fname('extract.proc_stat.log'))
for index, line in enumerate(stat_data):
tokens = line.split('\t')
sample = samples[index]
if debug:
print(line.rstrip())
print(sample)
print('-------------------')
self.assert_(floatEq(float(tokens[0]), sample.time))
self.assert_(floatEq(float(tokens[1]), sample.user))
self.assert_(floatEq(float(tokens[2]), sample.sys))
self.assert_(floatEq(float(tokens[3]), sample.io))
stat_data.close()
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = process_tree_test
import sys
import os
import unittest
sys.path.insert(0, os.getcwd())
import pybootchartgui.parsing as parsing
import pybootchartgui.process_tree as process_tree
import pybootchartgui.main as main
if sys.version_info >= (3, 0):
long = int
class TestProcessTree(unittest.TestCase):
def setUp(self):
self.name = "Process tree unittest"
self.rootdir = os.path.join(os.path.dirname(sys.argv[0]), '../../examples/1/')
parser = main._mk_options_parser()
options, args = parser.parse_args(['--q', self.rootdir])
writer = main._mk_writer(options)
trace = parsing.Trace(writer, args, options)
parsing.parse_file(writer, trace, self.mk_fname('proc_ps.log'))
trace.compile(writer)
self.processtree = process_tree.ProcessTree(writer, None, trace.ps_stats, \
trace.ps_stats.sample_period, None, options.prune, None, None, False, for_testing = True)
def mk_fname(self,f):
return os.path.join(self.rootdir, f)
def flatten(self, process_tree):
flattened = []
for p in process_tree:
flattened.append(p)
flattened.extend(self.flatten(p.child_list))
return flattened
def checkAgainstJavaExtract(self, filename, process_tree):
test_data = open(filename)
for expected, actual in zip(test_data, self.flatten(process_tree)):
tokens = expected.split('\t')
self.assertEqual(int(tokens[0]), actual.pid // 1000)
self.assertEqual(tokens[1], actual.cmd)
self.assertEqual(long(tokens[2]), 10 * actual.start_time)
self.assert_(long(tokens[3]) - 10 * actual.duration < 5, "duration")
self.assertEqual(int(tokens[4]), len(actual.child_list))
self.assertEqual(int(tokens[5]), len(actual.samples))
test_data.close()
def testBuild(self):
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.1.log'), process_tree)
def testMergeLogger(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.2.log'), process_tree)
def testPrune(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3b.log'), process_tree)
def testMergeExploders(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3c.log'), process_tree)
def testMergeSiblings(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
self.processtree.merge_siblings(self.processtree.process_tree)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3d.log'), process_tree)
def testMergeRuns(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
self.processtree.merge_siblings(self.processtree.process_tree)
self.processtree.merge_runs(self.processtree.process_tree)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3e.log'), process_tree)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = pybootchartgui
#!/usr/bin/python
#
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import sys
from pybootchartgui.main import main
if __name__ == '__main__':
sys.exit(main())
########NEW FILE########
__FILENAME__ = relocate_sdk
#!/usr/bin/env python
#
# Copyright (c) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# DESCRIPTION
# This script is called by the SDK installer script. It replaces the dynamic
# loader path in all binaries and also fixes the SYSDIR paths/lengths and the
# location of ld.so.cache in the dynamic loader binary
#
# AUTHORS
# Laurentiu Palcu <laurentiu.palcu@intel.com>
#
import struct
import sys
import stat
import os
import re
import errno
if sys.version < '3':
def b(x):
return x
else:
def b(x):
return x.encode(sys.getfilesystemencoding())
old_prefix = re.compile(b("##DEFAULT_INSTALL_DIR##"))
def get_arch():
f.seek(0)
e_ident =f.read(16)
ei_mag0,ei_mag1_3,ei_class = struct.unpack("<B3sB11x", e_ident)
if (ei_mag0 != 0x7f and ei_mag1_3 != "ELF") or ei_class == 0:
return 0
if ei_class == 1:
return 32
elif ei_class == 2:
return 64
def parse_elf_header():
global e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx
f.seek(0)
elf_header = f.read(64)
if arch == 32:
# 32bit
hdr_fmt = "<HHILLLIHHHHHH"
hdr_size = 52
else:
# 64bit
hdr_fmt = "<HHIQQQIHHHHHH"
hdr_size = 64
e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx =\
struct.unpack(hdr_fmt, elf_header[16:hdr_size])
def change_interpreter(elf_file_name):
if arch == 32:
ph_fmt = "<IIIIIIII"
else:
ph_fmt = "<IIQQQQQQ"
""" look for PT_INTERP section """
for i in range(0,e_phnum):
f.seek(e_phoff + i * e_phentsize)
ph_hdr = f.read(e_phentsize)
if arch == 32:
# 32bit
p_type, p_offset, p_vaddr, p_paddr, p_filesz,\
p_memsz, p_flags, p_align = struct.unpack(ph_fmt, ph_hdr)
else:
# 64bit
p_type, p_flags, p_offset, p_vaddr, p_paddr, \
p_filesz, p_memsz, p_align = struct.unpack(ph_fmt, ph_hdr)
""" change interpreter """
if p_type == 3:
# PT_INTERP section
f.seek(p_offset)
# External SDKs with mixed pre-compiled binaries should not get
# relocated so look for some variant of /lib
fname = f.read(11)
if fname.startswith(b("/lib/")) or fname.startswith(b("/lib64/")) or \
fname.startswith(b("/lib32/")) or fname.startswith(b("/usr/lib32/")) or \
fname.startswith(b("/usr/lib32/")) or fname.startswith(b("/usr/lib64/")):
break
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
break
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
def change_dl_sysdirs():
if arch == 32:
sh_fmt = "<IIIIIIIIII"
else:
sh_fmt = "<IIQQQQIIQQ"
""" read section string table """
f.seek(e_shoff + e_shstrndx * e_shentsize)
sh_hdr = f.read(e_shentsize)
if arch == 32:
sh_offset, sh_size = struct.unpack("<16xII16x", sh_hdr)
else:
sh_offset, sh_size = struct.unpack("<24xQQ24x", sh_hdr)
f.seek(sh_offset)
sh_strtab = f.read(sh_size)
sysdirs = sysdirs_len = ""
""" change ld.so.cache path and default libs path for dynamic loader """
for i in range(0,e_shnum):
f.seek(e_shoff + i * e_shentsize)
sh_hdr = f.read(e_shentsize)
sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link,\
sh_info, sh_addralign, sh_entsize = struct.unpack(sh_fmt, sh_hdr)
name = sh_strtab[sh_name:sh_strtab.find(b("\0"), sh_name)]
""" look only into SHT_PROGBITS sections """
if sh_type == 1:
f.seek(sh_offset)
""" default library paths cannot be changed on the fly because """
""" the string lengths have to be changed too. """
if name == b(".sysdirs"):
sysdirs = f.read(sh_size)
sysdirs_off = sh_offset
sysdirs_sect_size = sh_size
elif name == b(".sysdirslen"):
sysdirslen = f.read(sh_size)
sysdirslen_off = sh_offset
elif name == b(".ldsocache"):
ldsocache_path = f.read(sh_size)
new_ldsocache_path = old_prefix.sub(new_prefix, ldsocache_path)
# pad with zeros
new_ldsocache_path += b("\0") * (sh_size - len(new_ldsocache_path))
# write it back
f.seek(sh_offset)
f.write(new_ldsocache_path)
if sysdirs != "" and sysdirslen != "":
paths = sysdirs.split(b("\0"))
sysdirs = b("")
sysdirslen = b("")
for path in paths:
""" exit the loop when we encounter first empty string """
if path == b(""):
break
new_path = old_prefix.sub(new_prefix, path)
sysdirs += new_path + b("\0")
if arch == 32:
sysdirslen += struct.pack("<L", len(new_path))
else:
sysdirslen += struct.pack("<Q", len(new_path))
""" pad with zeros """
sysdirs += b("\0") * (sysdirs_sect_size - len(sysdirs))
""" write the sections back """
f.seek(sysdirs_off)
f.write(sysdirs)
f.seek(sysdirslen_off)
f.write(sysdirslen)
# MAIN
if len(sys.argv) < 4:
sys.exit(-1)
# In python > 3, strings may also contain Unicode characters. So, convert
# them to bytes
if sys.version_info < (3,):
new_prefix = sys.argv[1]
new_dl_path = sys.argv[2]
else:
new_prefix = sys.argv[1].encode()
new_dl_path = sys.argv[2].encode()
executables_list = sys.argv[3:]
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
perms = None
else:
os.chmod(e, perms|stat.S_IRWXU)
try:
f = open(e, "r+b")
except IOError:
exctype, ioex = sys.exc_info()[:2]
if ioex.errno == errno.ETXTBSY:
print("Could not open %s. File used by another process.\nPlease "\
"make sure you exit all processes that might use any SDK "\
"binaries." % e)
else:
print("Could not open %s: %s(%d)" % (e, ioex.strerror, ioex.errno))
sys.exit(-1)
# Save old size and do a size check at the end. Just a safety measure.
old_size = os.path.getsize(e)
arch = get_arch()
if arch:
parse_elf_header()
change_interpreter(e)
change_dl_sysdirs()
""" change permissions back """
if perms:
os.chmod(e, perms)
f.close()
if old_size != os.path.getsize(e):
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
########NEW FILE########
__FILENAME__ = sysroot-relativelinks
#!/usr/bin/env python
import sys
import os
# Take a sysroot directory and turn all the abolute symlinks and turn them into
# relative ones such that the sysroot is usable within another system.
if len(sys.argv) != 2:
print("Usage is " + sys.argv[0] + "<directory>")
sys.exit(1)
topdir = sys.argv[1]
topdir = os.path.abspath(topdir)
def handlelink(filep, subdir):
link = os.readlink(filep)
if link[0] != "/":
return
if link.startswith(topdir):
return
#print("Replacing %s with %s for %s" % (link, topdir+link, filep))
print("Replacing %s with %s for %s" % (link, os.path.relpath(topdir+link, subdir), filep))
os.unlink(filep)
os.symlink(os.path.relpath(topdir+link, subdir), filep)
for subdir, dirs, files in os.walk(topdir):
for f in files:
filep = os.path.join(subdir, f)
if os.path.islink(filep):
#print("Considering %s" % filep)
handlelink(filep, subdir)
########NEW FILE########
__FILENAME__ = dirsize
#!/usr/bin/env python
#
# Copyright (c) 2011, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Display details of the root filesystem size, broken up by directory.
# Allows for limiting by size to focus on the larger files.
#
# Author: Darren Hart <dvhart@linux.intel.com>
#
import os
import sys
import stat
class Record:
def create(path):
r = Record(path)
s = os.lstat(path)
if stat.S_ISDIR(s.st_mode):
for p in os.listdir(path):
pathname = path + "/" + p
ss = os.lstat(pathname)
if not stat.S_ISLNK(ss.st_mode):
r.records.append(Record.create(pathname))
r.size += r.records[-1].size
r.records.sort(reverse=True)
else:
r.size = os.lstat(path).st_size
return r
create = staticmethod(create)
def __init__(self, path):
self.path = path
self.size = 0
self.records = []
def __cmp__(this, that):
if that is None:
return 1
if not isinstance(that, Record):
raise TypeError
if len(this.records) > 0 and len(that.records) == 0:
return -1
if len(this.records) == 0 and len(that.records) > 0:
return 1
if this.size < that.size:
return -1
if this.size > that.size:
return 1
return 0
def show(self, minsize):
total = 0
if self.size <= minsize:
return 0
print "%10d %s" % (self.size, self.path)
for r in self.records:
total += r.show(minsize)
if len(self.records) == 0:
total = self.size
return total
def main():
minsize = 0
if len(sys.argv) == 2:
minsize = int(sys.argv[1])
rootfs = Record.create(".")
total = rootfs.show(minsize)
print "Displayed %d/%d bytes (%.2f%%)" % \
(total, rootfs.size, 100 * float(total) / rootfs.size)
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = ksize
#!/usr/bin/env python
#
# Copyright (c) 2011, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Display details of the kernel build size, broken up by built-in.o. Sort
# the objects by size. Run from the top level kernel build directory.
#
# Author: Darren Hart <dvhart@linux.intel.com>
#
import sys
import getopt
import os
from subprocess import *
from string import join
def usage():
prog = os.path.basename(sys.argv[0])
print 'Usage: %s [OPTION]...' % (prog)
print ' -d, display an additional level of drivers detail'
print ' -h, --help display this help and exit'
print ''
print 'Run %s from the top-level Linux kernel build directory.' % (prog)
class Sizes:
def __init__(self, glob):
self.title = glob
p = Popen("size -t " + glob, shell=True, stdout=PIPE, stderr=PIPE)
output = p.communicate()[0].splitlines()
if len(output) > 2:
sizes = output[-1].split()[0:4]
self.text = int(sizes[0])
self.data = int(sizes[1])
self.bss = int(sizes[2])
self.total = int(sizes[3])
else:
self.text = self.data = self.bss = self.total = 0
def show(self, indent=""):
print "%-32s %10d | %10d %10d %10d" % \
(indent+self.title, self.total, self.text, self.data, self.bss)
class Report:
def create(filename, title, subglob=None):
r = Report(filename, title)
path = os.path.dirname(filename)
p = Popen("ls " + path + "/*.o | grep -v built-in.o",
shell=True, stdout=PIPE, stderr=PIPE)
glob = join(p.communicate()[0].splitlines())
oreport = Report(glob, path + "/*.o")
oreport.sizes.title = path + "/*.o"
r.parts.append(oreport)
if subglob:
p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
for f in p.communicate()[0].splitlines():
path = os.path.dirname(f)
r.parts.append(Report.create(f, path, path + "/*/built-in.o"))
r.parts.sort(reverse=True)
for b in r.parts:
r.totals["total"] += b.sizes.total
r.totals["text"] += b.sizes.text
r.totals["data"] += b.sizes.data
r.totals["bss"] += b.sizes.bss
r.deltas["total"] = r.sizes.total - r.totals["total"]
r.deltas["text"] = r.sizes.text - r.totals["text"]
r.deltas["data"] = r.sizes.data - r.totals["data"]
r.deltas["bss"] = r.sizes.bss - r.totals["bss"]
return r
create = staticmethod(create)
def __init__(self, glob, title):
self.glob = glob
self.title = title
self.sizes = Sizes(glob)
self.parts = []
self.totals = {"total":0, "text":0, "data":0, "bss":0}
self.deltas = {"total":0, "text":0, "data":0, "bss":0}
def show(self, indent=""):
rule = str.ljust(indent, 80, '-')
print "%-32s %10s | %10s %10s %10s" % \
(indent+self.title, "total", "text", "data", "bss")
print rule
self.sizes.show(indent)
print rule
for p in self.parts:
if p.sizes.total > 0:
p.sizes.show(indent)
print rule
print "%-32s %10d | %10d %10d %10d" % \
(indent+"sum", self.totals["total"], self.totals["text"],
self.totals["data"], self.totals["bss"])
print "%-32s %10d | %10d %10d %10d" % \
(indent+"delta", self.deltas["total"], self.deltas["text"],
self.deltas["data"], self.deltas["bss"])
print "\n"
def __cmp__(this, that):
if that is None:
return 1
if not isinstance(that, Report):
raise TypeError
if this.sizes.total < that.sizes.total:
return -1
if this.sizes.total > that.sizes.total:
return 1
return 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "dh", ["help"])
except getopt.GetoptError, err:
print '%s' % str(err)
usage()
sys.exit(2)
driver_detail = False
for o, a in opts:
if o == '-d':
driver_detail = True
elif o in ('-h', '--help'):
usage()
sys.exit(0)
else:
assert False, "unhandled option"
glob = "arch/*/built-in.o */built-in.o"
vmlinux = Report.create("vmlinux", "Linux Kernel", glob)
vmlinux.show()
for b in vmlinux.parts:
if b.totals["total"] > 0 and len(b.parts) > 1:
b.show()
if b.title == "drivers" and driver_detail:
for d in b.parts:
if d.totals["total"] > 0 and len(d.parts) > 1:
d.show(" ")
if __name__ == "__main__":
main()
########NEW FILE########
| UTF-8 | Python | false | false | 1,724,689 | py | 16,703 | allPythonContent.py | 3,862 | 0.568044 | 0.560071 | 0.000032 | 46,342 | 35.216542 | 335 |
Weevils/India_Climate | 4,990,752,020,632 | 97a7b3ffda3d790323ab200f4040e93a1d933e6e | 0c22e50b81bc59bc3bed5eb0d3339964d50f2b47 | /Scripts/DataAnalysis_Yield_Stability_Quick.py | 4361529e4747281dc4a199f9c68e0e4c7cf9cd0b | []
| no_license | https://github.com/Weevils/India_Climate | 698941d36b38a244887e1453dd9901d800462ac3 | a5fb70b88894211f2db36bcf4cbb700aee9e0821 | refs/heads/master | 2019-08-20T20:48:07.954099 | 2016-09-23T22:57:43 | 2016-09-23T22:57:52 | 58,396,671 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Vinny_Ricciardi'
#Goal: merge and work with yield, climate (tmp, pre), and farm size data
# Todo: expand to tmp(erature), for now just pre(cipitation)
import pandas as pd
from datetime import datetime
import os
import glob
import matplotlib as plt
import seaborn as sns
import numpy as np
import sys
import pandas as pd
import numpy as np
pd.options.display.float_format = '{:.2f}'.format
import seaborn as sns; sns.set(color_codes=True)
sns.set()
import glob
import os
import time
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from mappy.utilities.plot_map import plot_map
startTime = datetime.now()
print startTime
# Step 1: Prep data
# Read in and merge data: precipitation, farm size, yields
TEMP = '/Users/Vinny_Ricciardi/Downloads/test.csv'
PATH_VDSA = '/Users/Vinny_Ricciardi/Documents/Data_Library/Survey/India/India_VDSA/' \
'Country_Wide_merged_timescales/temporal/'
PATH_CRU = '/Users/Vinny_Ricciardi/Documents/Data_Library/Survey/India/India_VDSA/' \
'CRU_Climate_VDSA_DIST/CRU_Cleaned_VDSA/'
base_df = pd.read_csv(PATH_VDSA + 'dt_operational_holdings_a_web.csv')
#clean farm size data
base_df = base_df.replace('-1', np.nan, regex=True)
base_df['YEAR'] = base_df['YEAR'].replace(1974, 1975)
base_df['YEAR'] = base_df['YEAR'].replace(1976, 1975)
base_df['YEAR'] = base_df['YEAR'].replace(1979, 1980)
base_df['YEAR'] = base_df['YEAR'].replace(1986, 1985)
base_df['YEAR'] = base_df['YEAR'].replace(2001, 2000)
#merge base with in a df not to be used, but will enable interpolation b/c contains all years needed
dummy = pd.read_csv(PATH_VDSA + 'dt_market_road_a_web.csv')
cv = ['DIST', 'YEAR']
base_df = pd.merge(base_df, dummy, how='outer', on=cv)
base_df = base_df.sort_values(['YEAR'])
district_list = [base_df.query('DIST == {}'.format(dist)).interpolate(method='linear') for dist in base_df.DIST.unique()]
base_df = pd.concat(district_list)
# merge climate and yield data to newly interpolated farm size data
def merge_VDSA(base_df, df_list):
for value in df_list:
base_df = pd.merge(base_df, value, how='outer', on=cv)
return base_df
df_list = [
pd.read_csv(PATH_VDSA + 'dt_area_prod_a_web.csv'),
pd.read_csv(PATH_CRU + 'Cleaned_.pre_all_MEAN.csv')
]
df = merge_VDSA(base_df, df_list)
df = df.replace('-1', np.nan, regex=True)
df.columns
# Calculate percent farm size per category; note, TOTAL_NO is new because VDSA version didn't aways equal 100 percent
df['TOTAL_NO'] = df[['MARG_NO', 'SMALL_NO', 'SEMIM_NO', 'MEDIUM_NO', 'LARGE_NO']].sum(axis=1)
df['MARG_NP'] = df['MARG_NO']/df['TOTAL_NO']
df['SMALL_NP'] = df['SMALL_NO']/df['TOTAL_NO']
df['SEMIM_NP'] = df['SEMIM_NO']/df['TOTAL_NO']
df['MEDIUM_NP'] = df['MEDIUM_NO']/df['TOTAL_NO']
df['LARGE_NP'] = df['LARGE_NO']/df['TOTAL_NO']
df['TOTAL_NP_Test'] = df[['MARG_NP', 'SMALL_NP', 'SEMIM_NP', 'MEDIUM_NP', 'LARGE_NP']].sum(axis=1)
df['TOTAL_NP_Test'].describe()
df = df.query('TOTAL_NP_Test > 0')
# check out Farm_Size_EDA.py for percentage plots if needed # add to this .py for fixed interpolation
# save df to csv for panel analysis
df.rename(columns={'SCAN_TA': 'SGUR_TA'}, inplace=True)
df.to_csv('/Users/Vinny_Ricciardi/Documents/Data_Library/Survey/India/India_VDSA/VDSA_lm_ready/df_panel.csv')
############################################################################################################
# Step 2. Prepare df for analysis: cv pre and yield, median farm size percentages
# clean production variables
# clean data: presently missing from below list
# df.rename(columns={'SCAN_TA': 'SGUR_TA'}, inplace=True) #sugar
# df.rename(columns={'SORG_KA': 'SORG_K_TA'}, inplace=True) #karif sorghum production
# df.rename(columns={'SORG_KQ': 'SORG_K_TQ'}, inplace=True) #karif sorghum area
# df.rename(columns={'SORG_RA': 'SORG_R_TA'}, inplace=True) #rabi sorghum production
# df.rename(columns={'SORG_RQ': 'SORG_R_TQ'}, inplace=True) #rabi sorghum area
grouped = df.groupby('DIST')
# Calculate CV precipitation (sigma over mu) for annual, karif and rabi
# Todo: make generalizable for all months
# Todo: make generalizable for al climate var (esp tmp)
# annual
gr_annual = grouped['annual'].agg({'annual_avg': np.mean,
'annual_std': np.std})
gr_annual['annual_cov'] = gr_annual['annual_std']/gr_annual['annual_avg']
gr_annual.reset_index(inplace=True)
# rabi
gr_rab = grouped['rabi'].agg({'rabi_avg': np.mean,
'rabi_std': np.std})
gr_rab['rabi_cov'] = gr_rab['rabi_std']/gr_rab['rabi_avg']
gr_rab.reset_index(inplace=True)
# karif
gr_karif = grouped['karif'].agg({'karif_avg': np.mean,
'karif_std': np.std})
gr_karif['karif_cov'] = gr_karif['karif_std']/gr_karif['karif_avg']
gr_karif.reset_index(inplace=True)
#merge pre
gr_pre = pd.merge(gr_annual, gr_rab, how='outer', on='DIST')
gr_pre = pd.merge(gr_karif, gr_pre, how='outer', on='DIST')
# Calculate CV yield for rice, cotton, sugar
#Todo: generalize to all crops
df['RICE_YIELD'] = df['RICE_TQ']/df['RICE_TA']
gr_ri = grouped['RICE_YIELD'].agg({'RICE_avg': np.mean,
'RICE_std': np.std})
gr_ri['RICE_cov'] = gr_ri['RICE_std']/gr_ri['RICE_avg']
gr_ri.reset_index(inplace=True)
df['COTN_YIELD'] = df['COTN_TQ']/df['COTN_TA']
gr_ct = grouped['COTN_YIELD'].agg({'COTN_avg': np.mean,
'COTN_std': np.std})
gr_ct['COTN_cov'] = gr_ct['COTN_std']/gr_ct['COTN_avg']
gr_ct.reset_index(inplace=True)
df['SGUR_YIELD'] = df['SGUR_TQ']/df['SGUR_TA']
gr_sg = grouped['SGUR_YIELD'].agg({'SGUR_avg': np.mean,
'SGUR_std': np.std})
gr_sg['SGUR_cov'] = gr_sg['SGUR_std']/gr_sg['SGUR_avg']
gr_sg.reset_index(inplace=True)
gr_yd = pd.merge(gr_ri, gr_ct, how='outer', on='DIST')
gr_yd = pd.merge(gr_sg, gr_yd, how='outer', on='DIST')
# Calculate median farm size proportions over time
# Todo: check other stats
# Todo: check percent area too
gr_fm = df.loc[:,['DIST', 'LARGE_NP', 'MEDIUM_NP', 'SEMIM_NP', 'SMALL_NP', 'MARG_NP']]
gr_fm = gr_fm.groupby('DIST').median()
gr_fm.reset_index(inplace=True)
# Merge 3 groups back to common df
df = pd.merge(gr_pre, gr_yd, how='outer', on='DIST')
df = pd.merge(gr_fm, df, how='outer', on='DIST')
df.columns
df.head()
df.to_csv('/Users/Vinny_Ricciardi/Documents/Data_Library/Survey/India/India_VDSA/VDSA_lm_ready/AAG_lm_input.csv')
#Todo: keep for crop generilization:
# clean data: presently missing from below list
# df.rename(columns={'SCAN_TA': 'SGUR_TA'}, inplace=True) #sugar
# df.rename(columns={'SORG_KA': 'SORG_K_TA'}, inplace=True) #karif sorghum production
# df.rename(columns={'SORG_KQ': 'SORG_K_TQ'}, inplace=True) #karif sorghum area
# df.rename(columns={'SORG_RA': 'SORG_R_TA'}, inplace=True) #rabi sorghum production
# df.rename(columns={'SORG_RQ': 'SORG_R_TQ'}, inplace=True) #rabi sorghum area
# crops = ['BRLY', 'CAST', 'CERL', 'COTN', 'CPEA', 'FMLT', 'GNUT', 'LINS',
# 'MAIZ', 'OILS', 'PMLT', 'PPEA', 'PULS', 'RICE', 'RM', 'SAFF',
# 'SESA', 'SORG', 'SOYA', 'SUNF', 'WHT']
############################################################################################################
#Todo below is very messy!
# Plotting
# Correlation plot
# Compute the correlation matrix
# corr = df.corr()
#
# # Generate a mask for the upper triangle
# mask = np.zeros_like(corr, dtype=np.bool)
# mask[np.triu_indices_from(mask)] = True
#
# # Set up the matplotlib figure
# f, ax = plt.subplots(figsize=(11, 9))
#
# # Generate a custom diverging colormap
# cmap = sns.diverging_palette(220, 10, as_cmap=True)
#
# # yticks = df.index
# # keptticks = yticks[::int(len(yticks)/10)]
# # yticks = ['' for y in yticks]
# # yticks[::int(len(yticks)/10)] = keptticks
# #
# # xticks = df.columns
# # keptticks = xticks[::int(len(xticks)/10)]
# # xticks = ['' for y in xticks]
# # xticks[::int(len(xticks)/10)] = keptticks
#
#
# # Draw the heatmap with the mask and correct aspect ratio
# sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, annot=True,
# square=True, xticklabels=5, yticklabels=5,
# linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
#
# plt.yticks(rotation=0)
#
# plt.show()
#
#
#
# # Step 3: bivariate plots farm size yield
# fig1 = plt.figure(1, figsize=(25,10))
# # fig1.suptitle('%s' % x, fontsize=20)
#
# z = (4,5) # plot size dimensions
#
# # row 1: number of farmer distribution per size class
# ax1 = plt.subplot2grid(z, (0,0), rowspan=1, colspan=1)
# ax2 = plt.subplot2grid(z, (0,1), rowspan=1, colspan=1)
# ax3 = plt.subplot2grid(z, (0,2), rowspan=1, colspan=1)
# ax4 = plt.subplot2grid(z, (0,3), rowspan=1, colspan=1)
# ax5 = plt.subplot2grid(z, (0,4), rowspan=1, colspan=1)
#
# ax1.yaxis.grid(False)
# ax2.yaxis.grid(False)
# ax3.yaxis.grid(False)
# ax4.yaxis.grid(False)
# ax5.yaxis.grid(False)
#
# ax1.xaxis.grid(False)
# ax2.xaxis.grid(False)
# ax3.xaxis.grid(False)
# ax4.xaxis.grid(False)
# ax5.xaxis.grid(False)
#
# ax1.xaxis.set_ticklabels([])
# ax2.xaxis.set_ticklabels([])
# ax3.xaxis.set_ticklabels([])
# ax4.xaxis.set_ticklabels([])
# ax5.xaxis.set_ticklabels([])
#
# ax1.set_title('Marginal', fontsize=12)
# ax2.set_title('Small', fontsize=12)
# ax3.set_title('SemiMedium', fontsize=12)
# ax4.set_title('Medium', fontsize=12)
# ax5.set_title('Large', fontsize=12)
#
# sns.kdeplot(df.MARG_NP, ax=ax1, label='')
# sns.kdeplot(df.SMALL_NP, ax=ax2, label='')
# sns.kdeplot(df.SEMIM_NP, ax=ax3, label='')
# sns.kdeplot(df.MEDIUM_NP, ax=ax4, label='')
# sns.kdeplot(df.LARGE_NP, ax=ax5, label='')
#
#
# # Row 2: Bivariate farm size yield relationships
# ax12 = plt.subplot2grid(z, (1,0), rowspan=1, colspan=1)
# ax22 = plt.subplot2grid(z, (1,1), rowspan=1, colspan=1)
# ax32 = plt.subplot2grid(z, (1,2), rowspan=1, colspan=1)
# ax42 = plt.subplot2grid(z, (1,3), rowspan=1, colspan=1)
# ax52 = plt.subplot2grid(z, (1,4), rowspan=1, colspan=1)
#
# ax12.yaxis.grid(False)
#
# ax12.xaxis.set_ticklabels([])
# ax22.xaxis.set_ticklabels([])
# ax32.xaxis.set_ticklabels([])
# ax42.xaxis.set_ticklabels([])
# ax52.xaxis.set_ticklabels([])
#
# ax12.axes.get_xaxis().set_visible(False)
# ax22.axes.get_xaxis().set_visible(False)
# ax32.axes.get_xaxis().set_visible(False)
# ax42.axes.get_xaxis().set_visible(False)
# ax52.axes.get_xaxis().set_visible(False)
#
# ax22.axes.get_yaxis().set_visible(False)
# ax32.axes.get_yaxis().set_visible(False)
# ax42.axes.get_yaxis().set_visible(False)
# ax52.axes.get_yaxis().set_visible(False)
#
# sns.regplot(df['MARG_NP'], df['RICE_cov'], ax=ax12)
# sns.regplot(df['SMALL_NP'], df['RICE_cov'], ax=ax22)
# sns.regplot(df['SEMIM_NP'], df['RICE_cov'], ax=ax32)
# sns.regplot(df['MEDIUM_NP'], df['RICE_cov'], ax=ax42)
# sns.regplot(df['LARGE_NP'], df['RICE_cov'], ax=ax52)
#
# # Row 3: Bivariate farm size to climate relationships
# ax12 = plt.subplot2grid(z, (2,0), rowspan=1, colspan=1)
# ax22 = plt.subplot2grid(z, (2,1), rowspan=1, colspan=1)
# ax32 = plt.subplot2grid(z, (2,2), rowspan=1, colspan=1)
# ax42 = plt.subplot2grid(z, (2,3), rowspan=1, colspan=1)
# ax52 = plt.subplot2grid(z, (2,4), rowspan=1, colspan=1)
#
# ax12.yaxis.grid(False)
#
# ax12.xaxis.set_ticklabels([])
# ax22.xaxis.set_ticklabels([])
# ax32.xaxis.set_ticklabels([])
# ax42.xaxis.set_ticklabels([])
# ax52.xaxis.set_ticklabels([])
#
# ax12.axes.get_xaxis().set_visible(False)
# ax22.axes.get_xaxis().set_visible(False)
# ax32.axes.get_xaxis().set_visible(False)
# ax42.axes.get_xaxis().set_visible(False)
# ax52.axes.get_xaxis().set_visible(False)
#
# ax22.axes.get_yaxis().set_visible(False)
# ax32.axes.get_yaxis().set_visible(False)
# ax42.axes.get_yaxis().set_visible(False)
# ax52.axes.get_yaxis().set_visible(False)
#
# sns.regplot(df['MARG_NP'], df['annual_cov'], ax=ax12)
# sns.regplot(df['SMALL_NP'], df['annual_cov'], ax=ax22)
# sns.regplot(df['SEMIM_NP'], df['annual_cov'], ax=ax32)
# sns.regplot(df['MEDIUM_NP'], df['annual_cov'], ax=ax42)
# sns.regplot(df['LARGE_NP'], df['annual_cov'], ax=ax52)
#
#
# # Row 4: Bivariate climate to yield relationships
# ax14 = plt.subplot2grid(z, (3,0), rowspan=1, colspan=1)
# ax24 = plt.subplot2grid(z, (3,1), rowspan=1, colspan=1)
# ax34 = plt.subplot2grid(z, (3,2), rowspan=1, colspan=1)
#
# ax14.yaxis.grid(False)
#
# ax14.xaxis.set_ticklabels([])
# ax24.xaxis.set_ticklabels([])
# ax34.xaxis.set_ticklabels([])
#
# # ax14.axes.get_xaxis().set_visible(False)
# # ax24.axes.get_xaxis().set_visible(False)
# # ax34.axes.get_xaxis().set_visible(False)
#
# ax24.axes.get_yaxis().set_visible(False)
# ax34.axes.get_yaxis().set_visible(False)
#
# sns.regplot(df['annual_cov'], df['RICE_cov'], ax=ax14)
# sns.regplot(df['rabi_cov'], df['RICE_cov'], ax=ax24)
# sns.regplot(df['karif_cov'], df['RICE_cov'], ax=ax34)
# print datetime.now() - startTime
#
#
#
# #relationships
# #farm size and yield
# #yield and climate
# #farm size and climate
#
# fig1 = plt.figure(1, figsize=(20,3))
# # fig1.suptitle('%s' % x, fontsize=20)
#
# z = (1,5) # plot size dimensions
#
# ax1 = plt.subplot2grid(z, (0,0), rowspan=1, colspan=1)
# ax2 = plt.subplot2grid(z, (0,1), rowspan=1, colspan=1)
# ax3 = plt.subplot2grid(z, (0,2), rowspan=1, colspan=1)
# ax4 = plt.subplot2grid(z, (0,3), rowspan=1, colspan=1)
# ax5 = plt.subplot2grid(z, (0,4), rowspan=1, colspan=1)
#
# sns.regplot(df['MARG_NP'], df['RICE_cov'], ax=ax1)
# sns.regplot(df['SMALL_NP'], df['RICE_cov'], ax=ax2)
# sns.regplot(df['SEMIM_NP'], df['RICE_cov'], ax=ax3)
# sns.regplot(df['MEDIUM_NP'], df['RICE_cov'], ax=ax4)
# sns.regplot(df['LARGE_NP'], df['RICE_cov'], ax=ax5)
#
#
#mapping
#
# shp = '/Users/Vinny_Ricciardi/Documents/Data_Library/GIS_Library/India/India_Admin_shp/Dist_1970_VDSA/District_1970_Dict_proj.shp'
# PATH_1 = '/Users/Vinny_Ricciardi/Downloads/'
#
#
# m = df['RICE_cov'].max()
#
# # for i in x:
# plot_map(shp, df, unique_shp=['DIST'], unique_tbl=['DIST'], variable2map= ['RICE_cov'],
# min_value=0., max_value=m, fig_size=(2, 3), dpi=200, projection='lcc',
# out_figure= '/Users/Vinny_Ricciardi/Downloads/Map_cov_test.png')
#
# help(plot_map)
#
# print datetime.now() - startTime | UTF-8 | Python | false | false | 14,086 | py | 10 | DataAnalysis_Yield_Stability_Quick.py | 9 | 0.647096 | 0.617919 | 0 | 426 | 32.068075 | 132 |
yeldiRium/2048 | 11,587,821,788,200 | b79bf649a4fff85a2cbc7361546a9248f7d56b80 | c6895e2b1e81ad5ad7309c379c35a1e533b8570d | /exceptions.py | 1a2fc21b335099eca214b34374801889fcbfa1e2 | []
| no_license | https://github.com/yeldiRium/2048 | f9d8f85eab141f430df2116bc053633902b19713 | 8e12b3a4140d93c51a783914bfabdeb2682e7679 | refs/heads/master | 2021-01-21T12:59:08.329248 | 2016-04-09T23:06:44 | 2016-04-09T23:06:44 | 55,513,557 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class GameNotInitializedError(Exception):
pass
class GameLostError(Exception):
pass
class InvalidActionError(Exception):
pass
class NoEmptyContainerError(Exception):
pass
class MoveNotAllowedError(Exception):
pass
class FusionNotAllowedError(Exception):
pass
| UTF-8 | Python | false | false | 293 | py | 23 | exceptions.py | 22 | 0.761092 | 0.761092 | 0 | 22 | 12.318182 | 41 |
Mfhodges/CRF2 | 19,679,540,155,105 | 4e1823a344d604e9c1a5b20b22aa07722afaedcd | e3095c9f598ad7c59a059ca27554546c18906863 | /crf2/custom_auth.py | 1109e6c75b7286a5480d8c8f96c8eb841dea3f2c | []
| no_license | https://github.com/Mfhodges/CRF2 | 9d6c465fabad7c7e09b8324675a460b12e9f71fa | 636f17ed540456ec867c17f8078dd96ffcdbabc4 | refs/heads/master | 2022-02-27T23:06:45.884356 | 2020-10-12T18:31:43 | 2020-10-12T18:31:43 | 172,382,692 | 1 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from course.models import Profile
class CustomRemoteUserMiddleware(RemoteUserMiddleware):
def configure_user(request, user):
meta = request.META
try:
user.update(first_name=meta['givenName'],last_name=meta['sn'],email=meta['mail'])
Profile.objects.update_or_create(user=user,penn_id=meta['penn_id'])
except:
#fail silently
pass
| UTF-8 | Python | false | false | 514 | py | 123 | custom_auth.py | 56 | 0.684825 | 0.684825 | 0 | 14 | 35.714286 | 93 |
jack-mars/selemium_webdriver | 1,503,238,596,648 | 4b222385bcb82302332228ca379442a7a6b7b74b | e9e11aee109878d490a77eef74fb14aa1538f684 | /test37understandingmethods.py | 071bc8082bc8aa719d0b1d016ed76219766b76ec | []
| no_license | https://github.com/jack-mars/selemium_webdriver | c107990a1564ec038db6b7c52556fd7438aa13f2 | 1e76ac27a47422bc996831946974a3c107202ae8 | refs/heads/master | 2020-04-01T09:00:03.503760 | 2018-10-25T03:55:18 | 2018-10-25T03:55:18 | 153,055,658 | 0 | 0 | null | false | 2018-10-16T05:54:12 | 2018-10-15T05:03:15 | 2018-10-15T17:28:33 | 2018-10-16T05:54:11 | 20 | 0 | 0 | 0 | Python | false | null | def sum_nums(n1, n2):
print(n1 + n2)
sum_nums(2, 8)
l = [1, 2, 3, 2, 2]
print(l.count(2))
print(l.append(6))
print(l)
print(len(l)) | UTF-8 | Python | false | false | 135 | py | 50 | test37understandingmethods.py | 47 | 0.577778 | 0.481481 | 0 | 8 | 16 | 21 |
Dachoka3000/password-locker | 2,611,340,151,366 | 01ac8183a595b9e117d0b3b630204afcd67152e1 | 3221080135f2740a29738144b52e18d026347c5f | /credentials_test.py | 971e355cfcd96c1a7f2177bf03ef5d8a93d12740 | [
"MIT"
]
| permissive | https://github.com/Dachoka3000/password-locker | 82aeffcebf9198a492d2107cffaff70b1d47474a | 179f0c29c03b0a20f09a477f1b4ef86ab7d18c27 | refs/heads/master | 2022-12-22T15:47:21.084424 | 2020-09-16T15:29:00 | 2020-09-16T15:29:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import pyperclip
from credentials import Credentials
class TestCredentials(unittest.TestCase):
'''
Test class that defines test cases for the credentials class behaviours
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def tearDown(self):
'''
tearDown method that cleans up after each tesst case is run
'''
Credentials.credentials_list = []
def setUp(self):
'''
Setup method to run before each test cases
'''
self.new_credentials = Credentials("Twitter", "MaryX", "maryx1000")
def test_init(self):
'''
test_init test case to test if the object is initialised correctly
'''
self.assertEqual(self.new_credentials.account, "Twitter")
self.assertEqual(self.new_credentials.user_name, "MaryX")
self.assertEqual(self.new_credentials.pass_word,"maryx1000")
def test_save_credentials(self):
'''
test_save_credentials to test if the credentials objects are saved into the credentials list
'''
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 1)
def test_save_multiple_credentials(self):
'''
test_save_multiple_credentials to check if we can saave multiple credentials
'''
self.new_credentials.save_credentials()
test_credential = Credentials("Instagram", "Muse Art", "museart1000")
test_credential.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 2)
def test_delete_credentials(self):
'''
test to check if we can delete credentials from the credentials list
'''
self.new_credentials.save_credentials()
test_credential = Credentials("Instagram", "Muse Art", "museart1000")
test_credential.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_find_credentials_by_account(self):
'''
test to check if we can find credentials by account and display information
'''
self.new_credentials.save_credentials()
test_credential = Credentials("Snapchat", "Test User", "user195")
test_credential.save_credentials()
found_credential = Credentials.find_by_account("Snapchat")
self.assertEqual(found_credential.pass_word, test_credential.pass_word)
def test_credentials_exists(self):
'''
test to check if we can return a boolean if we cannot find the credentials
'''
self.new_credentials.save_credentials()
test_credential = Credentials("Snapchat", "Test User", "user195")
test_credential.save_credentials()
credential_exists = Credentials.credentials_exist("Snapchat")
self.assertTrue(credential_exists)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)
def test_copy_details(self):
'''
test to confirm that we are copying the details from a found account
'''
self.new_credentials.save_credentials()
Credentials.copy_details("Snapchat")
self.assertEqual(self.new_credentials.user_name, pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| UTF-8 | Python | false | false | 3,515 | py | 6 | credentials_test.py | 5 | 0.652916 | 0.645804 | 0 | 109 | 31.229358 | 100 |
shikouchen/WalkingTKK_crawler | 7,198,365,199,514 | cac43cce1e43cff4b8d3f2127b0ad9e321729e3a | 43c0be39103305ad8ad6b8fee78b3d515a13f8de | /jw_update_info/check_ele_info.py | f39ee281f35db83d8b2ef30a8c8fb84132d8bc6f | []
| no_license | https://github.com/shikouchen/WalkingTKK_crawler | 96b66b22a09801e769684fcdf51c365efdac1e64 | 6e3567c74901575d446dad0c4fe50368fb567796 | refs/heads/master | 2020-05-31T16:59:12.824500 | 2019-06-05T13:09:23 | 2019-06-05T13:09:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author: Erick
from jw_info_update_method import *
from log import logger
import os,sys
import urllib
import xml.dom.minidom
import time
import datetime
import json
import urllib.request
import requests
class CheckEleInfo():
def __init__(self):
pass
def ReadSet(self):
host = ""
user = ""
passwd = ""
db = ""
charset = ""
if os.path.exists("settings.conf"):
dom = xml.dom.minidom.parse('settings.conf')
data = dom.documentElement
try:
host = data.getElementsByTagName("host")[0].childNodes[0].data
user = data.getElementsByTagName("user")[0].childNodes[0].data
passwd = data.getElementsByTagName("passwd")[0].childNodes[0].data
db = data.getElementsByTagName("db")[0].childNodes[0].data
charset = data.getElementsByTagName("charset")[0].childNodes[0].data
except:
print("The configuration information is incomplete")
sys.exit(0)
return host,user,passwd,db,charset
def check_ele(self):
host = "120.78.63.47"
user = "api"
passwd = "forsource,110"
db = "tkk_app"
charset = 'utf8'
print("更新电费信息--开始--" + str(datetime.datetime.now()))
# host, user, passwd, db, charset = self.ReadSet()
print(host)
jw_update = JW(host, user, passwd, db, charset)
url = 'https://www.xujc.club/api/userallinfofromtkkdatabase'
headers = {
"Authorization": "Basic YWRtaW46YWRtaW4xMjM0NQ=="
}
request = urllib.request.Request(url, headers=headers)
rawData = urllib.request.urlopen(request)
jsonStr = rawData.read() # 使用read方法将json数据读出
users = json.loads(jsonStr.decode('utf8')) # 将json数据转换为python的列表格式
# print(users)
for user in users:
tel = user["tel"]
drRoomId = user["drRoomId"]
drlou = user["drlou"]
drxiaoqu = user["drxiaoqu"]
send_message_status = user["send_message_status"]
if drxiaoqu and drlou and drRoomId:
try:
ele_url = "https://www.xujc.club/api/electricchargeinfo/" + tel
ele_resp = requests.get(url=ele_url, headers=headers)
ele_raw_data = ele_resp.text
ele_raw_data = json.loads(ele_raw_data) # 将json数据转换为python的列表格式
result = ele_raw_data[0]["result"]
if result != "1":
print(tel + "---userinfo表中的宿舍信息:" + drxiaoqu + drlou + drRoomId)
try:
jw_update.get_ele(tel, drxiaoqu, drlou, drRoomId, send_message_status)
except Exception as e:
print("crawl info--更新电费信息失败--" + str(datetime.datetime.now()) + "--" + tel + "---" + str(e))
except Exception as e:
print("获取电费信息失败" + tel + "---" + str(e))
print("更新电费信息--结束")
if __name__ == '__main__':
check = CheckEleInfo()
check.check_ele()
| UTF-8 | Python | false | false | 3,351 | py | 30 | check_ele_info.py | 22 | 0.539732 | 0.529448 | 0 | 85 | 36.705882 | 120 |
lukewbecker/classification-exercises | 9,835,475,125,690 | 0ed5c1582374c7513bcd4e8716e2848ded2072e8 | 9f0a648ace0555f5a7af3a7a59a4b34c74f8238e | /aquire.py | 651f4048638f9d333ac30e24e88a3826e43531eb | []
| no_license | https://github.com/lukewbecker/classification-exercises | 10afb485a517d814df108abc718d8505f9c19d42 | fa21ad6e2466896d2a945ed8b2fe522b35487183 | refs/heads/master | 2022-12-12T05:59:21.089591 | 2020-09-24T13:24:26 | 2020-09-24T13:24:26 | 293,915,453 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Creating the aquire.py file
# Make a new python module, acquire.py to hold the following data aquisition functions:
# get_titanic_data
# get_iris_data
# Importing libraries:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Importing the os library specifically for reading the csv once I've created the file in my working directory.
import os
# Make a function named get_titanic_data that returns the titanic data from the codeup data science database as a pandas data frame. Obtain your data from the Codeup Data Science Database.
# Setting up the user credentials:
from env import host, user, password
def get_db(db, user=user, host=host, password=password):
return f'mysql+pymysql://{user}:{password}@{host}/{db}'
# titanic_db query
sql_query = 'SELECT * FROM passengers'
def get_titanic_data():
return pd.read_sql('SELECT * FROM passengers', get_db('titanic_db'))
# Showing the df:
# print(get_titanic_data())
# Make a function named get_iris_data that returns the data from the iris_db on the codeup data science database as a pandas data frame. The returned data frame should include the actual name of the species in addition to the species_ids. Obtain your data from the Codeup Data Science Database.
def get_iris_data():
return pd.read_sql('''SELECT measurement_id, sepal_length, sepal_width, petal_length, petal_width, m.species_id, species_name
FROM measurements AS m
JOIN species AS s on m.species_id = s.species_id;''', get_db('iris_db'))
# Once you've got your get_titanic_data and get_iris_data functions written, now it's time to add caching to them. To do this, edit the beginning of the function to check for a local filename like titanic.csv or iris.csv. If they exist, use the .csv file. If the file doesn't exist, then produce the SQL and pandas necessary to create a dataframe, then write the dataframe to a .csv file with the appropriate name.
def get_titanic_data_cache():
filename = 'titanic.csv'
if os.path.isfile(filename):
return pd.read_csv(filename)
else:
titanic_df = pd.read_sql('SELECT * FROM passengers', get_db('titanic_db'))
titanic_df.to_csv(filename, index = False)
# Ryan was using .to_file, but I was getting error when trying to use that function.
return titanic_df
# print(get_titanic_data_cache())
# Defining the variable for my titanic data:
# df_titanic = get_titanic_data_cache()
# # Creating a seaborn chart:
# sns.relplot(data = df_titanic, x = 'age', y = 'fare', hue = 'class')
# plt.show()
# Doing the same for the iris_db:
def get_iris_data_cache():
filename = 'iris.csv'
if os.path.isfile(filename):
return pd.read_csv(filename)
else:
iris_df = get_iris_data()
iris_df.to_csv(filename, index = False)
# Ryan was using .to_file, but I was getting error when trying to use that function.
return iris_df
# print(type(get_iris_data_cache()))
# print(get_iris_data_cache())
print('End of file.') | UTF-8 | Python | false | false | 3,041 | py | 10 | aquire.py | 4 | 0.713252 | 0.713252 | 0 | 82 | 36.097561 | 414 |
HelenProkopova/structural-programming | 14,010,183,349,418 | 8cf36a42a6bb33e9aea28671176b2c3c0fe32fc8 | 560549f12e4139d31225d806406d489ec9b36f0c | /4.1.py | 3dc2f2229adaf0a7a42e1d55fd05ef6816b79f81 | []
| no_license | https://github.com/HelenProkopova/structural-programming | ee937f5072efb80594d53a5c1131e0de88800be5 | 56f9885dbf34024a4da2d038110a50c002d8f279 | refs/heads/master | 2021-01-19T14:40:45.800619 | 2017-05-24T18:43:48 | 2017-05-24T18:43:48 | 88,181,810 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s=input ('введите строку: ')
t=int(0)
for i in range(len(s)):
if s[i]== ',' or s[i]== '.' :
t=t+1
print ('количество точек и запятых= ',t)
| UTF-8 | Python | false | false | 212 | py | 25 | 4.1.py | 24 | 0.448864 | 0.4375 | 0 | 7 | 20.714286 | 40 |
katarzynalatos/Lab_1 | 7,533,372,642,781 | 5b27a787979b1b8249feed8d02b9e52c06aff541 | 9d1a857b529e2c1fe9ae4174b5d859e094299a86 | /problem2/InputFileValidator.py | 1c9be451969cac835cc9f3af4d210facb03f399e | []
| no_license | https://github.com/katarzynalatos/Lab_1 | 900c40e2513be858cceda8a7b9b20325fe30b029 | 710bcd135775783337c5728c88d8283e800419b9 | refs/heads/master | 2020-05-25T23:59:03.156302 | 2017-04-05T06:53:38 | 2017-04-05T06:53:38 | 84,980,182 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from os.path import isfile
from os.path import isdir
from os import listdir
from AbstractValidator import AbstractValidator
class InputFileValidator(AbstractValidator):
def __init__(self, name):
self._name=name
self._files = []
def validate(self):
if isfile(self._name):
self._files.append(self._name)
return True
elif isdir(self._name):
self._files = [self._name+"/"+files for files in listdir(self._name) if isfile(self._name+"/"+files)]
if len(self._files) > 0:
return True
else:
return False
else:
return False
def get_list_of_files(self):
return self._files
| UTF-8 | Python | false | false | 757 | py | 7 | InputFileValidator.py | 5 | 0.557464 | 0.556143 | 0 | 26 | 27.115385 | 113 |
Aasthaengg/IBMdataset | 3,848,290,739,084 | d6999daea8ff69ecd418869fce153499d0e9368d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02747/s241428742.py | c8aff520471c477e8a417c13c4623a03de673779 | []
| no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | s=input()
if len(s)%2==0:
for i in range(len(s)//2):
if s[2*i:2*i+2]=="hi":
continue
print("No")
break
else:
print("Yes")
else:
print("No") | UTF-8 | Python | false | false | 167 | py | 202,060 | s241428742.py | 202,055 | 0.497006 | 0.461078 | 0 | 11 | 14.272727 | 28 |
ericw13/radabo | 2,138,893,759,898 | 925b67370ff7768675d691992373fdb5a4061780 | 463aaa1a47c50f93e19e7b6b7d4d00bbdaec7769 | /app/radabo/urls.py | 7a9971749ef4ecc0ef9890ec71139dada65db41b | []
| no_license | https://github.com/ericw13/radabo | 83bee8d1c1dadac05999876a69d8a650f97dcfce | ed6b9ba4b57d8fccc59686ea66c98ed23e9646f0 | refs/heads/master | 2021-01-12T06:55:25.197106 | 2016-12-19T15:09:50 | 2016-12-19T15:09:50 | 76,681,084 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from radabo import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^prioritization/?$', views.Priority, name='priority'),
url(r'^dashboard/?$', views.Dashboard, name='dashboard'),
url(r'^enhancements/release/?$', views.ReleaseReport, name='release'),
url(r'^enhancements/release/chart/?$', views.enhGraph, name='releasechart'),
url(r'^enhancements/sprint/?$', views.SprintReport, name='sprint'),
url(r'^enhancements/pending/?$', views.PendingUAT, name='UAT'),
url(r'^enhancements/backlog/?$', views.Backlog, name='backlog'),
url(r'^enhancements/backlog/(?P<chartType>[a-zA-Z]+)/?$', views.BacklogGraphs, name='blgraphs'),
url(r'^enhancements/bymodule/?$', views.enhByModule, name='allbymod'),
url(r'^enhancements/other/?$', views.NonFinance, name='nonfin'),
url(r'^backlog/(?P<chartType>[a-zA-Z]+)/?$', views.BacklogGraphs, name='oldblgraphs'),
url(r'^projects/grooming/?$', views.ProjectGrooming, name='projectGrooming'),
url(r'^projects/active/(?P<epic>E[0-9]+)/?$', views.ProjectStories, name='projectStories'),
url(r'^projects/active/?$', views.EpicView, name='activeProjects'),
url(r'^velocity/old/?$', views.OldVelocityChart, name='oldvelocity'),
url(r'^velocity/?$', views.VelocityChart, name='velocity'),
url(r'^sprint/?$', views.FullSprint, name='full_sprint'),
url(r'^syncstory/?$', views.updateStory, name='updateStory'),
url(r'^info/?$', views.Info, name='info'),
]
| UTF-8 | Python | false | false | 1,525 | py | 49 | urls.py | 21 | 0.668197 | 0.666885 | 0 | 25 | 60 | 100 |
msh5/pipfile-sort | 7,816,840,513,810 | 9d995078121f8703b8757e4e8ee42b56a1074b46 | fcc5337a191d4214655f8c34c84674270c153aa5 | /pipfile_sort/__init__.py | d660901d32413abfb6582396a808173febfa3446 | [
"MIT"
]
| permissive | https://github.com/msh5/pipfile-sort | 6dd6bab03a5e69f80bf241e0ed4792f418fe3272 | c196cf282c20e2aaefdce8ea8959faf638311ef6 | refs/heads/master | 2023-02-18T07:41:47.518111 | 2021-09-02T13:07:07 | 2021-09-02T13:07:07 | 223,908,588 | 10 | 2 | MIT | false | 2023-02-08T01:50:09 | 2019-11-25T09:20:43 | 2022-11-15T13:13:27 | 2023-02-08T01:50:09 | 44 | 10 | 2 | 5 | Python | false | false | from click import command
from click import option
from click import version_option
from plette import Pipfile
from plette.pipfiles import PackageCollection
import sys
APP_VERSION = '0.2.2'
PIPFILE_FILENAME = './Pipfile'
PIPFILE_ENCODING = 'utf-8'
@command()
@version_option(version=APP_VERSION)
@option('--exit-code', is_flag=True, help=
'change to behavior of exit code. default behavior of return value, 0 is no differences, 1 is error exit. '
'return 2 when add this option. 2 is exists differences.')
def main(exit_code):
# Load current data.
with open(PIPFILE_FILENAME, encoding=PIPFILE_ENCODING) as f:
pipfile = Pipfile.load(f)
# Sort "dev-packages" mapping.
sorted_dev_packages, all_changed = __sort_collection(pipfile.dev_packages)
# Sort "packages" mapping.
sorted_packages, changed = __sort_collection(pipfile.packages)
if changed:
all_changed = True
# Replace with sorted lists
pipfile.dev_packages = sorted_dev_packages
pipfile.packages = sorted_packages
# Store sorted data.
with open(PIPFILE_FILENAME, 'w', encoding=PIPFILE_ENCODING) as f:
Pipfile.dump(pipfile, f)
# When --exit-code option is valid and package collection has been changed, exit with 2.
if exit_code and all_changed:
sys.exit(2)
def __sort_collection(org_collection):
org_packages = [p for p in org_collection]
sorted_packages = sorted(org_packages)
return (
PackageCollection({
p: org_collection[p]._data for p in sorted_packages
}),
org_packages != sorted_packages,
)
| UTF-8 | Python | false | false | 1,612 | py | 7 | __init__.py | 2 | 0.687345 | 0.681141 | 0 | 53 | 29.396226 | 111 |
vert-rouge/sample-zodb-migration-2to3 | 14,834,817,076,671 | 59339f1ab85776fd6bac63f3449b26ba9c844192 | 302c9d74cd6b5dd910d0d810d8f646e054d6662c | /readDbfrompython3.py | f85beb975702c42e212a1b27ff96932a7f740f2d | []
| no_license | https://github.com/vert-rouge/sample-zodb-migration-2to3 | ff7f45cbdbcbac4d1f180a3abbf0424473c21d61 | 0295f7e9a5349135b18aad7dfd7b72a9a10237e1 | refs/heads/master | 2021-09-10T01:21:24.714389 | 2018-03-20T14:17:18 | 2018-03-20T14:17:18 | 126,027,959 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import ZODB, ZODB.FileStorage
storage = ZODB.FileStorage.FileStorage('Data_python2.fs')
db = ZODB.DB(storage)
with db.transaction() as connection:
print("la transaction est ouverte!")
print(connection.root.truc)
print(dir(connection.root.truc))
for k,v in connection.root.truc.items():
print(k)
print(v)
| UTF-8 | Python | false | false | 364 | py | 3 | readDbfrompython3.py | 3 | 0.67033 | 0.664835 | 0 | 15 | 23.266667 | 57 |
stevenman42/DefenderDotPyToo | 9,680,856,298,397 | 05522959a044a6a662c665a5be72edb1642009c7 | eb7e62574e80a3561d4917aef98d2e4cadd88dd7 | /main.py | fe859ed44ee8094d2ca4fb07fb7a133e5471bb1c | []
| no_license | https://github.com/stevenman42/DefenderDotPyToo | 6fbff160787c614c3f99350e9d24a8970608ccd4 | e11bf1e1fba3509837b55f8ac74b738bc23760df | refs/heads/master | 2020-05-07T17:42:24.301865 | 2014-07-10T02:01:46 | 2014-07-10T02:01:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame, sys
from classes import *
from math import *
pygame.init()
pygame.mixer.init()
pygame.font.init()
#test
WIDTH = 960
HEIGHT = 640
BLUE = ( 50, 80,220)
RED = (230, 70, 90)
SKY_BLUE = (191,244,255)
screen = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Defender")
clock = pygame.time.Clock()
# pygame.event.set_grab(1)
pygame.mouse.set_visible(0)
EverythingList = [] # not necessarily everything, just everything that needs to be rendered!
projectileList = []
CloudList = []
MortarList = []
TankList = []
EnemyList = []
HeartList = []
player = Player((960/2),(640-32),16,32)
EverythingList.append(player)
shootCount = 0
gravity = 2
totalFrames = 0
canShoot = True
Score = 0
Lives = 10
Money = 0
MortarIcon = pygame.image.load("images/mortar_icon.png")
LaserIcon = pygame.image.load("images/laser_icon.png")
ground = pygame.image.load("images/ground.png")
tank = pygame.image.load("images/tank.png")
helicopter = pygame.image.load("images/helicopter.png")
laser = pygame.mixer.Sound("sounds/shoot.wav")
mortarSound = pygame.mixer.Sound("sounds/mortar.wav")
MortarDrop = pygame.mixer.Sound("sounds/mortardrop.wav")
HeliHit = pygame.mixer.Sound("sounds/helihit.wav")
font = pygame.font.Font(None, 36)
ActiveWeapon = 'laser'
ActiveWeaponIcon = LaserIcon
def GetMousePos():
mPos = pygame.mouse.get_pos()
return mPos
def GetMouseState():
clict = pygame.mouse.get_pressed()[0]
return clict
def Shoot():
global Money
if ActiveWeapon == 'laser':
laser.play(loops = 0)
projectile = Projectile(player.xPos, player.yPos, 8, 8)
if projectile.xPos < mPos[0]:
projectile.xVel = (mPos[0]-player.xPos)/(uniform(10.5,11.5))
elif projectile.xPos > mPos[0]:
projectile.xVel = (mPos[0]-player.xPos)/(uniform(10.5,11.5))
if projectile.yPos < mPos[1]:
projectile.yVel = ((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/(uniform(10.5,11.5))
elif projectile.yPos > mPos[1]:
projectile.yVel = -((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/(uniform(10.5,11.5))
projectileList.append(projectile)
EverythingList.append(projectile)
y = HEIGHT-(HEIGHT-player.yPos)-mPos[1]
x = mPos[0]-player.xPos
elif ActiveWeapon == 'mortar':
if Money >= 5:
mortarSound.play(loops = 0)
mortProject = Projectile(player.xPos, player.yPos, 16, 16)
if mortProject.xPos < mPos[0]:
mortProject.xVel = (mPos[0]-player.xPos)/(uniform(9.5,10.5))
elif mortProject.xPos > mPos[0]:
mortProject.xVel = (mPos[0]-player.xPos)/(uniform(9.5,10.5))
if mortProject.yPos < mPos[1]:
mortProject.yVel = ((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/(uniform(9.5,10.5))
elif mortProject.yPos > mPos[1]:
mortProject.yVel = -((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/(uniform(9.5,10.5))
MortarList.append(mortProject)
EverythingList.append(mortProject)
Money -= 5
elif ActiveWeapon == 'triple laser':
laser.play(loops = 0)
for i in [10,12,14]:
projectile = Projectile(player.xPos, player.yPos, 8, 8)
if projectile.xPos < mPos[0]:
projectile.xVel = (mPos[0]-player.xPos)/i
elif projectile.xPos > mPos[0]:
projectile.xVel = (mPos[0]-player.xPos)/i
if projectile.yPos < mPos[1]:
projectile.yVel = ((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/i
elif projectile.yPos > mPos[1]:
projectile.yVel = -((HEIGHT-(HEIGHT-player.yPos)) - mPos[1])/i
projectileList.append(projectile)
EverythingList.append(projectile)
y = HEIGHT-(HEIGHT-player.yPos)-mPos[1]
x = mPos[0]-player.xPos
while True:
# Keyboard Input #
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
pass # Maybe player movement sometime in the future, but probably not
elif event.key == pygame.K_RIGHT:
pass
elif event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == pygame.K_1:
ActiveWeapon = 'laser'
ActiveWeaponIcon = LaserIcon
elif event.key == pygame.K_2:
ActiveWeapon = 'mortar'
ActiveWeaponIcon = MortarIcon
elif event.key == pygame.K_3:
ActiveWeapon = 'triple laser'
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
pass
# End Keyboard Input #
clict = GetMouseState()
mPos = GetMousePos()
# the stuff that happens when you click
if clict == 1: # If the user left-clicks:
mPos = GetMousePos() # Retrieve the position of the cursor for processing
if canShoot == True:
Shoot()
canShoot = False
else:
pass
if clict == 0:
canShoot = True
screen.blit(ground, (0, 0))
# Text (Score, Lives, Money!) #
ScoreText = font.render(str(Score), 1, (10, 10, 10))
screen.blit(ScoreText, (10,10))
LivesText = font.render(str(Lives), 1, (255,10,10))
screen.blit(LivesText, (10, 40))
MoneyText = font.render(str(Money), 1, (80,205,50))
screen.blit(MoneyText, (920, 10))
# Weapon Icons #
screen.blit(ActiveWeaponIcon, (10, 70))
for cloud in CloudList:
if cloud.xPos >= WIDTH + 128:
CloudList.remove(cloud)
EverythingList.remove(cloud)
for projectile in projectileList:
if projectile.yPos < 0 or projectile.xPos < 0 or projectile.xPos > WIDTH:
projectileList.remove(projectile)
EverythingList.remove(projectile)
print(len(projectileList))
for enemy in EnemyList:
if enemy.xPos > -32 and enemy.xPos < WIDTH:
enemy.rect = pygame.Rect(enemy.xPos,enemy.yPos,enemy.width,enemy.height)
projectile.rect = pygame.Rect(projectile.xPos,projectile.yPos,projectile.width,projectile.height)
if projectile.rect.colliderect(enemy.rect) or enemy.rect.contains(projectile.rect):
if enemy in EnemyList:
enemy.yVel = 18
HeliHit.play(loops = 0)
if randint(1,15) == 1:
NewHeart = Heart(enemy.xPos, enemy.yPos, 16,16)
HeartList.append(NewHeart)
EverythingList.append(NewHeart)
if projectile in projectileList:
projectileList.remove(projectile)
EverythingList.remove(projectile)
Score += 1
Money += 5
if enemy.xPos >= WIDTH + 64:
EnemyList.remove(enemy)
EverythingList.remove(enemy)
if enemy.yPos >= HEIGHT:
EnemyList.remove(enemy)
EverythingList.remove(enemy)
else:
pass
for heart in HeartList:
heart.rect = pygame.Rect(heart.xPos,heart.yPos,heart.width,heart.height)
projectile.rect = pygame.Rect(projectile.xPos,projectile.yPos,projectile.width,projectile.height)
if projectile.rect.colliderect(heart.rect):
if heart in HeartList:
HeartList.remove(heart)
EverythingList.remove(heart)
Lives += 1
for mortar in MortarList:
mortar.yVel += gravity
if mortar.yPos > HEIGHT:
MortarList.remove(mortar)
EverythingList.remove(mortar)
MortarDrop.play(loops = 0)
for tank in TankList:
tank.rect = pygame.Rect(tank.xPos,tank.yPos,tank.width,tank.height)
mortar.rect = pygame.Rect(mortar.xPos,mortar.yPos,mortar.width,mortar.height)
if mortar.rect.colliderect(tank.rect) or tank.rect.contains(mortar.rect):
if tank in TankList:
TankList.remove(tank)
EverythingList.remove(tank)
MortarList.remove(mortar)
EverythingList.remove(mortar)
Score += 2
Money += 10
for enemy in EnemyList:
if enemy.xPos > WIDTH:
EnemyList.remove(enemy)
EverythingList.remove(enemy)
Lives -= 1
for heart in HeartList:
if heart.yPos > HEIGHT:
HeartList.remove(heart)
EverythingList.remove(heart)
if totalFrames % 60 == 0:
NCX = randint(-100, -50)
NCY = randint(64, 128)
NewCloud = Cloud(NCX, NCY, 128, 64)
CloudList.append(NewCloud)
EverythingList.append(NewCloud)
if randint(1,2) == 1:
NEX = randint(-100,0)
NEY = randint(64,300)
NewEnemy = Enemy(NEX, NEY, 32, 16,'helicopter')
EnemyList.append(NewEnemy)
EverythingList.append(NewEnemy)
if randint(1,2) == 1:
NEX = randint(-100,0)
NEY = HEIGHT-16
NewEnemy = Enemy(NEX, NEY, 32, 16, 'tank')
TankList.append(NewEnemy)
EverythingList.append(NewEnemy)
for tank in TankList:
if tank.xPos > WIDTH/2:
if tank in TankList:
TankList.remove(tank)
EverythingList.remove(tank)
Lives -= 1
for everything in EverythingList:
everything.render(screen)
everything.xPos += everything.xVel
everything.yPos += everything.yVel
pygame.draw.rect(screen, (0,0,0), (mPos[0] - 16, mPos[1] - 1, 32, 2))
pygame.draw.rect(screen, (0,0,0), (mPos[0] - 1, mPos[1] - 16, 2, 32))
pygame.draw.circle(screen, (200, 60, 70), (mPos[0], mPos[1]), 2)
pygame.draw.circle(screen, (0,0,0), (mPos[0], mPos[1]),12,2)
clock.tick(60)
shootCount += 1
totalFrames += 1
pygame.display.flip() | UTF-8 | Python | false | false | 8,608 | py | 4 | main.py | 3 | 0.682156 | 0.645795 | 0 | 340 | 24.320588 | 101 |
m-wrzr/code30 | 4,114,578,706,809 | 1152e8d9b88912fd1a70a0387411ab6311c98dff | 1089f139ea13e422f8c52668866414cc1b18085d | /solutions/21/solution.py | 2e97783123553a5579e1f6dd5341ad0761d2ee27 | []
| no_license | https://github.com/m-wrzr/code30 | d297d12888cf0256cf64daca594086f4cadb8938 | 2f28371685364b4b64e3253bd3a07d211f81ceee | refs/heads/master | 2020-12-24T19:37:05.939264 | 2016-05-12T15:56:08 | 2016-05-12T15:56:08 | 56,145,017 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | inputSize = "large"
with open("A-" + inputSize + "-practice.in", "r") as file:
output = ""
nCase = int(file.readline())
for case in range(0, nCase):
inputs = file.readline().split()
standing = 0
friendsNeeded = 0
for i, amount in enumerate([int(s) for s in list(inputs[1])]):
friendsNeeded = max(friendsNeeded, i - standing)
standing += amount
output += "Case #{}: {}\n".format(case + 1, friendsNeeded)
print(output)
# write to txt
with open("A-" + inputSize + "-output.txt", "w") as text_file:
text_file.write(output)
| UTF-8 | Python | false | false | 610 | py | 37 | solution.py | 18 | 0.570492 | 0.562295 | 0 | 24 | 24.291667 | 70 |
Zilby/Stuy-Stuff | 10,307,921,511,886 | db17878d614dc60e52483374b4d9f9052fec6ed5 | 2b25aae9266437b657e748f3d6fea4db9e9d7f15 | /graphics/polygons/8/adam_dehovitz/draw.py | 90b55664497a1feccbc227a858e47511f42846e6 | []
| no_license | https://github.com/Zilby/Stuy-Stuff | b1c3bc23abf40092a8a7a80e406e7c412bd22ae0 | 5c5e375304952f62667d3b34b36f0056c1a8e753 | refs/heads/master | 2020-05-18T03:03:48.210196 | 2018-11-15T04:50:03 | 2018-11-15T04:50:03 | 24,191,397 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from display import *
from matrix import *
import math
def add_rect( points, x0, y0, z0, h, w, d):
add_edge(points, x0, y0, z0, x0, y0, z0)
add_edge(points, x0, y0 - h, z0, x0, y0 - h, z0)
add_edge(points, x0, y0, z0 - d, x0, y0, z0 - d)
add_edge(points, x0, y0 - h, z0 - d, x0, y0 - h, z0 - d)
add_edge(points, x0 + w, y0 - h, z0, x0 + w, y0 - h, z0)
add_edge(points, x0 + w, y0 - h, z0 - d,x0 + w, y0 - h, z0 - d)
add_edge(points, x0 + w, y0, z0, x0 + w, y0, z0)
add_edge(points, x0 + w, y0, z0 - d, x0 + w, y0, z0 - d)
def add_sphere( points, cx, cy, r):
step = 0.1
t1 = 0
while (t1 <= 1.00000001):
t2 = 0
while (t2 <= 1.00000001):
theta1 = math.pi * t1
theta2 = 2 * math.pi * t2
x = cx + r * math.cos(theta1)
y = cy + r * math.sin(theta1) * math.cos(theta2)
z = r * math.sin(theta1) * math.sin(theta2)
add_point( points, x, y, z )
t2 += 0.01
t1 += 0.01
def add_torus(points, cx, cy, r1, r2):
t1 = 0
while (t1 <= 1.001):
t2 = 0
while (t2 <= 1.001):
theta1 = 2 * math.pi * t1
theta2 = 2 * math.pi * t2
x = cx + math.cos(theta1) * (r1 * math.cos(theta2) + r2)
y = cy + r1 * math.sin(theta2)
z = math.sin(theta1) * (r1 * math.cos(theta2) + r2)
add_point(points, x, y, z)
t2 += 0.01
t1 += 0.01
def add_circle( points, cx, cy, cz, r, step ):
x0 = cx + r
y0 = cy
z0 = cz
t=0
while ( t<=1.0001) :
x = r * math.cos( 2 * math.pi * t ) + cx
y = r * math.sin( 2 * math.pi * t ) + cy
z = cz
#print( str(x) + ", " + str(y) )
add_edge (points, x0, y0, z0, x, y, z)
x0 = x
y0 = y
z0 = z
t= t+ step
def add_curve( points, x0, y0, x1, y1, x2, y2, x3, y3, step, curve_type ):
t = 0
xco = generate_curve_coefs(x0, x1, x2, x3, curve_type)
yco = generate_curve_coefs(y0, y1, y2, y3, curve_type)
while (t <= 1.0001):
x = xco[0]*math.pow(t,3) + xco[1]*math.pow(t,2) + xco[2]*(t)+xco[3]
y = yco[0]*math.pow(t,3) + yco[1]*math.pow(t,2) + yco[2]*(t)+yco[3]
add_edge( points, x0, y0, 0, x, y, 0)
x0 = x
y0 = y
t += step
def draw_lines( matrix, screen, color ):
if len( matrix ) < 2:
print "Need at least 2 points to draw a line"
p = 0
while p < len( matrix ) - 1:
draw_line( screen, matrix[p][0], matrix[p][1],
matrix[p+1][0], matrix[p+1][1], color )
p+= 2
def add_edge( matrix, x0, y0, z0, x1, y1, z1 ):
add_point( matrix, x0, y0, z0 )
add_point( matrix, x1, y1, z1 )
def add_point( matrix, x, y, z=0 ):
matrix.append( [x, y, z, 1] )
def draw_line( screen, x0, y0, x1, y1, color ):
dx = x1 - x0
dy = y1 - y0
if dx + dy < 0:
dx = 0 - dx
dy = 0 - dy
tmp = x0
x0 = x1
x1 = tmp
tmp = y0
y0 = y1
y1 = tmp
if dx == 0:
y = y0
while y <= y1:
plot(screen, color, x0, y)
y = y + 1
elif dy == 0:
x = x0
while x <= x1:
plot(screen, color, x, y0)
x = x + 1
elif dy < 0:
d = 0
x = x0
y = y0
while x <= x1:
plot(screen, color, x, y)
if d > 0:
y = y - 1
d = d - dx
x = x + 1
d = d - dy
elif dx < 0:
d = 0
x = x0
y = y0
while y <= y1:
plot(screen, color, x, y)
if d > 0:
x = x - 1
d = d - dy
y = y + 1
d = d - dx
elif dx > dy:
d = 0
x = x0
y = y0
while x <= x1:
plot(screen, color, x, y)
if d > 0:
y = y + 1
d = d - dx
x = x + 1
d = d + dy
else:
d = 0
x = x0
y = y0
while y <= y1:
plot(screen, color, x, y)
if d > 0:
x = x + 1
d = d - dy
y = y + 1
d = d + dx
| UTF-8 | Python | false | false | 4,306 | py | 742 | draw.py | 601 | 0.397817 | 0.33372 | 0 | 158 | 26.246835 | 75 |
HectorTR/ica | 7,765,300,884,587 | e86f03f7d907a6f68eb4d3818b8b81ff141a3ab4 | 37f0da68f87247c814fa6ad039ceee8be2167ffd | /my_test.py | d51f2e34d78d7b4119215e1fbe4356b9ce31deaf | []
| no_license | https://github.com/HectorTR/ica | 7b4d65e31aec3bd7a0e09401b1763cd1e1e590d1 | 4caed51fb54175948edd07010d823b286b7f4236 | refs/heads/master | 2022-11-17T08:48:41.018118 | 2020-07-03T19:13:05 | 2020-07-03T19:13:05 | 262,254,893 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
def find(criteria) :
finder = criteria.split("=")
if len(finder) < 2 :
raise Exception("find(criteria='key=value'): criteria syntax error")
objects = gridlabd.get("objects")
result = []
for name in objects :
item = gridlabd.get_object(name)
if finder[0] in item and item[finder[0]] == finder[1] :
if "name" in item.keys() :
result.append(item["name"])
else :
result.append("%s:%s" % (item["class"],item["id"]))
return result
def on_init(t):
houses = find("class=house")
global recorder
recorder = open("house.csv","w")
recorder.write("name,datetime,temperature\n")
return True
def record_house(name,t) :
global recorder
if recorder :
house = gridlabd.get_object(name)
recorder.write("%s,%s,%s\n" % (house["name"],house["clock"],house["air_temperature"]))
return True
def on_term(t):
recorder.close() | UTF-8 | Python | false | false | 955 | py | 4 | my_test.py | 3 | 0.591623 | 0.587435 | 0 | 33 | 27.909091 | 92 |
bopopescu/Stock_Predictor | 17,575,006,207,099 | e1dd81b061ab9a8e9a15ebf2b11005becd30898d | 6f9b457083216409be606d9903586738504336ad | /env/lib/python2.7/sre_parse.py | c173415e47fe11ff527ea10fff4531f59bd548cb | []
| no_license | https://github.com/bopopescu/Stock_Predictor | ab998a3f83a0968cdc6a97a67298693932ae4537 | bea27ceb06aeebb9d2f45ddf9dd17a6732a456c8 | refs/heads/master | 2022-04-12T10:42:34.313159 | 2018-12-09T05:04:09 | 2018-12-09T05:04:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /Users/jeffreychen/anaconda2/lib/python2.7/sre_parse.py | UTF-8 | Python | true | false | 55 | py | 108 | sre_parse.py | 25 | 0.836364 | 0.781818 | 0 | 1 | 55 | 55 |
listar0810/jupyterlab-snippets-multimenus | 17,712,445,159,228 | 71a220e15c2f5471e39ad1841d9df2f99dcc9bf7 | 9b32771b7d1513ee37bc62dd347675abcfc1bfc9 | /example_snippets/multimenus_snippets/Snippets/NumPy/File IO/Write single array to text file.py | 201b833875f14aa3b988fe7fb756335b515b2511 | [
"BSD-3-Clause"
]
| permissive | https://github.com/listar0810/jupyterlab-snippets-multimenus | 44087ef1aeb030a3074862a337508b57d50072c6 | 477f51cfdbad7409eab45abe53cf774cd70f380c | refs/heads/master | 2022-12-12T18:19:25.221083 | 2020-09-08T01:11:01 | 2020-09-08T01:11:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | np.savetxt(filename, x) | UTF-8 | Python | false | false | 23 | py | 884 | Write single array to text file.py | 881 | 0.782609 | 0.782609 | 0 | 1 | 23 | 23 |
muhadyan/Modul-2-JCDS07 | 15,315,853,398,982 | 7dc1ed5ddc1aaf48cfa39dc275cde1a08b9de9b1 | 93935b1f31257d80222d39113d54ffe06ba1c6fa | /Day_20/.ipynb_checkpoints/Day20Task-checkpoint.py | 372f237dd99e9d99e3b686c39274394248d22fca | []
| no_license | https://github.com/muhadyan/Modul-2-JCDS07 | 5b34beea506d40a788898cfd4016eb81484b8c93 | d6d32e3be6a36d20597f9ef9d04898990002ee01 | refs/heads/master | 2021-01-05T01:03:30.442952 | 2020-02-16T03:12:20 | 2020-02-16T03:12:20 | 240,823,002 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d
df = pd.read_excel('indo_12_1.xls', header=3, index_col=0, skipfooter=3, na_values='-')
fig = plt.figure(figsize=[15,5])
p = plt.subplot(111, projection='3d')
for j in df:
for i in df:
x = np.arange(len(df[i]))
y = i
z = np.zeros(len(df[i]))
dx = np.ones(len(df[i]))
dy = np.ones(len(df[i]))
dz = df[i]
p.bar3d(x, y, z, dx, dy, dz)
plt.xticks(np.arange(len(df.index)), df.index, rotation=90)
# plt.yticks(np.arange(len(list(df))), list(df), rotation=45)
p.set_zlabel('Sumbu Z')
plt.show() | UTF-8 | Python | false | false | 660 | py | 76 | Day20Task-checkpoint.py | 31 | 0.60303 | 0.572727 | 0 | 26 | 24.423077 | 87 |
urgemag/urge | 4,483,945,892,610 | 0ab262cc8b6ec8c3cb8211990cfeea8d1223d785 | a00f31c439f81fba6327a43f2cb0aeeace4cdea6 | /flask/views/admin/accesses_add.py | cb94b0df5328f04bfb10f380f9b264897a5a78f6 | [
"MIT"
]
| permissive | https://github.com/urgemag/urge | 183b6579262a2bf5f959ad193734e9da3683f0d7 | a2ca0a9d2253770204dafbbad0cf9538c357d717 | refs/heads/master | 2023-04-21T15:44:45.721486 | 2021-04-26T20:15:47 | 2021-04-26T20:15:47 | 329,867,661 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, session, redirect, render_template, request, flash, url_for,abort
from models import PageDetails, Database, General, Authentication
from functools import wraps
from validator_collection import *
admin_add = Blueprint("admin_add", __name__)
@admin_add.route("/Admin", methods=["POST", "GET"])
def admin_index():
return render_template("admin/admin_options.html")
@admin_add.route("/Admin/Add", methods=["POST", "GET"])
def admin_add_index():
return render_template("admin/admin_add_options.html")
@admin_add.route("/Admin/Social", methods=["POST", "GET"])
def admin_social_index():
return render_template("admin/admin_social_options.html")
@admin_add.route("/Admin/Add/User", methods=["POST", "GET"])
def add_user_admin():
""" The Add User Page as an admin. """
message = False
if request.method == "POST":
accesses = []
for course in Database().get_all_slug_and_names_of_courses_from_db():
if (course["Slug"]) == request.form.get(course["Slug"]):
accesses.append(course["Slug"])
message = Database().add_users_data_to_db(
email=request.form.get("email"),
password=General().sha256_hash(request.form.get("password")),
first_name=request.form.get("name"),
last_name=request.form.get("last_name"),
phone_number=request.form.get("phone_num"),
about_me=request.form.get("about_me"),
bio=request.form.get("bio"),
website=request.form.get("web"),
birth_day=request.form.get("day"),
birth_month=request.form.get("month"),
birth_year=request.form.get("year"),
Accesses=accesses,
)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
message["Result"] = message["Message"]
message["Color"] = "red"
flash(message)
return redirect(url_for("admin_add.add_user_admin"))
return render_template(
"admin/admin_add_user.html",
Accesses=PageDetails().all_accesses_admin_page(),
message=message,
)
@admin_add.route("/Admin/Add/Day", methods=["POST", "GET"])
def add_day_options_admin():
""" The Add options Page as an admin. """
return render_template(
"admin/admin_add_day_options.html",
)
@admin_add.route("/Admin/Add/Day/Essential", methods=["POST", "GET"])
def add_day_essential_main_data_admin():
""" The Add Essential Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
slug = request.form.get("slug")
day = request.form.get("day")
name_day = request.form.get("name_day")
description_status_existence = request.form.get("description-status")
description = request.form.get("description")
price_status = request.form.get("price-status")
if is_not_empty(slug):
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if is_not_empty(day) == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if is_not_empty(name_day):
return {"Result": False, "Message": "اسم روز را وارد کنید."}
if description_status_existence == "With-Description" and is_not_empty(description):
return {"Result": False, "Message": "توضیحات روز را وارد کنید."}
if price_status == "None-Free":
freeness = False
else:
freeness = True
if is_numeric(day):
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
try:
uploaded_file = request.files["cover"]
except:
return {"Result": False, "Message": "تصویر را آپلود کنید."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
uploaded_image = request.files.get("cover")
uploaded_image_bytes = uploaded_image.read()
hash_image = General().sha256_hash_bytes(uploaded_image_bytes)
format_file = General().format_recognizer(uploaded_image_bytes)
file_name = "cover_of_day_of_course_" + str(hash_image) + "." + format_file
if General().check_existence_of_a_file("static/assets/courses/{slug}/days/{day}".format(slug=slug,day=day)) is False:
General().setup_course_folder(slug)
location_image = "static/assets/courses/{slug}/days/{day}/{file_name}".format(slug=slug,day=day, file_name=file_name)
location_image_href = "/static//assets/courses/{slug}/days/{day}/{file_name}".format(slug=slug,day=day, file_name=file_name)
with open(location_image, "wb") as file:
file.write(uploaded_image_bytes)
General().image_resizer_using_imgp(location_image, 700)
General().image_optimizer_using_imgp(location_image)
message = Database().add_day_essential_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
name_persian=request.form.get("name_day"),
description=request.form.get("description"),
image_path=location_image_href,
freeness=freeness,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_essential_main_data_admin"))
return render_template(
"admin/admin_add_day_essential.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
import re
@admin_add.route("/Admin/Add/Day/Text", methods=["POST", "GET"])
def add_day_text_data_admin():
""" The Add Main Text Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
text = request.form.get("text")
day = request.form.get("day")
slug = request.form.get("slug")
if slug == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if text == "":
return {"Result": False, "Message": "متن اصلی را وارد کنید."}
try:
int(day)
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(slug) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
links_images = re.findall("src=[\"\'](.*?)[\"\']", text)
if General().check_existence_of_a_file("static/assets/courses/{slug}/days/{day}".format(slug=slug,day=day)) is False:
General().setup_course_folder(slug)
for link in links_images:
file_path = General().href_to_path(link)
file_name = (file_path.split("/"))[-1]
new_file_path = "static/assets/courses/{slug}/days/{day_number}/{file_name}".format(slug=slug,day_number=day, file_name=file_name)
new_file_href = "/static//assets/courses/{slug}/days/{day_number}/{file_name}".format(slug=slug,day_number=day, file_name=file_name)
General().move_file_path(file_path, new_file_path)
text = text.replace(link, new_file_href)
message = Database().add_day_text_data_to_db(
course_name_slug=slug,
day_num=day,
text=text,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_text_data_admin"))
return render_template(
"admin/admin_add_day_text.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Todo", methods=["POST", "GET"])
def add_day_todo_data_admin():
""" The Add Todo-Excersices Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("todo") == "":
return {"Result": False, "Message": "تمارین را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_todo_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
todo=request.form.get("todo"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_text_data_admin"))
return render_template(
"admin/admin_add_day_todo.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Quotes", methods=["POST", "GET"])
def add_day_quotes_data_admin():
""" The Add Quotes Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if list(filter(None, request.form.getlist("quote"))) == []:
return {"Result": False, "Message": "نقل قول ها را وارد کنید."}
quotes = list(filter(None, request.form.getlist("quote")))
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_quotes_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
quotes=quotes,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_quotes_data_admin"))
return render_template(
"admin/admin_add_day_quotes.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Music", methods=["POST", "GET"])
def add_day_music_data_admin():
""" The Add Music Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
covers = request.files.getlist("cover")
musics = request.files.getlist("music")
creators = request.form.getlist("creator")
names = request.form.getlist("music_name")
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("description") == "":
return {"Result": False, "Message": "توضیحات موزیک را وارد کنید."}
for music in musics:
if music.filename == "":
musics.remove(music)
for cover in covers:
if cover.filename == "":
covers.remove(cover)
for name in names:
if name == "":
names.remove(name)
for creator in creators:
if creator == "":
creators.remove(creator)
if creators == [] or names == []:
return {"Result": False, "Message": "اطلاعات موزیک را وارد کنید."}
if musics == [] or covers == []:
return {
"Result": False,
"Message": "مدیاهای مربوط به موزیک را وارد کنید.",
}
if not (
len(covers) == len(musics)
and len(musics) == len(creators)
and len(creators) == len(names)
):
return {"Result": False, "Message": "همه فیلد های موزیک را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_musics_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
description=request.form.get("description"),
covers=covers,
musics=musics,
creators=creators,
names=names,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_music_data_admin"))
return render_template(
"admin/admin_add_day_music.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Ted", methods=["POST", "GET"])
def add_day_ted_data_admin():
""" The Add TED video Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
urls = request.form.getlist("ted_url")
qualities = request.form.getlist("ted_quality")
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("description") == "":
return {"Result": False, "Message": "توضیحات ویدیوها را وارد کنید."}
for url in urls:
if url == "":
urls.remove(url)
for quality in qualities:
if quality == "":
qualities.remove(quality)
if urls == [] or qualities == []:
return {"Result": False, "Message": "لینک و یا کیفیت ها را وارد کنید."}
if not (len(urls) == len(qualities)):
return {"Result": False, "Message": "همه فیلد های ویدیو را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_ted_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
description=request.form.get("description"),
urls=urls,
qualities=qualities,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_ted_data_admin"))
return render_template(
"admin/admin_add_day_ted.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Animation", methods=["POST", "GET"])
def add_day_animation_data_admin():
""" The Add short Animation film Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
urls = request.form.getlist("animation_url")
qualities = request.form.getlist("animation_quality")
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("description") == "":
return {"Result": False, "Message": "توضیحات ویدیو را وارد کنید."}
for url in urls:
if url == "":
urls.remove(url)
for quality in qualities:
if quality == "":
qualities.remove(quality)
if urls == [] or qualities == []:
return {"Result": False, "Message": "لینک و یا کیفیت ها را وارد کنید."}
if not (len(urls) == len(qualities)):
return {
"Result": False,
"Message": "همه فیلد های انیمیشن را وارد کنید.",
}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_animation_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
description=request.form.get("description"),
urls=urls,
qualities=qualities,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_animation_data_admin"))
return render_template(
"admin/admin_add_day_animation.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Podcast", methods=["POST", "GET"])
def add_day_podcast_data_admin():
""" The Add podcast Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
creator = request.form.get("creator")
name = request.form.get("podcast_name")
cover = request.files.get("cover")
url = request.form.get("podcast_url")
if creator == "" or name == "":
return {"Result": False, "Message": "اطلاعات پادکست را وارد کنید."}
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("description") == "":
return {"Result": False, "Message": "توضیحات پادکست را وارد کنید."}
if url == "":
return {"Result": False, "Message": "لینک پادکست را وارد کنید."}
if cover.filename == "":
return {"Result": False, "Message": "کاور پادکست را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_podcast_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
description=request.form.get("description"),
url=url,
cover=cover,
creator=creator,
name=name,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_podcast_data_admin"))
return render_template(
"admin/admin_add_day_podcast.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Day/Movie", methods=["POST", "GET"])
def add_day_movie_data_admin():
""" The Add Movie Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
cover = request.files.get("cover")
urls = request.form.getlist("movie_url")
qualities = request.form.getlist("movie_quality")
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
if request.form.get("description") == "":
return {"Result": False, "Message": "توضیحات پادکست را وارد کنید."}
if cover.filename == "":
return {"Result": False, "Message": "کاور پادکست را وارد کنید."}
for url in urls:
if url == "":
urls.remove(url)
for quality in qualities:
if quality == "":
qualities.remove(quality)
if urls == [] or qualities == []:
return {"Result": False, "Message": "لینک و یا کیفیت ها را وارد کنید."}
if not (len(urls) == len(qualities)):
return {"Result": False, "Message": "همه فیلد ها را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().add_day_movie_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
description=request.form.get("description"),
urls=urls,
qualities=qualities,
cover=cover,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_day_movie_data_admin"))
return render_template(
"admin/admin_add_day_movie.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_add.route("/Admin/Add/Course", methods=["POST", "GET"])
def add_course_admin():
""" The Add Course Page as an admin. """
if request.method == "POST":
def form_course(request):
if request.form.get("slug") == "":
return {"Message": "نام انگلیسی دوره را وارد کنید."}
slug = request.form.get("slug").replace(" ","-")
try:
uploaded_file = request.files["cover"]
except:
return {"Message": "تصویر را آپلود کنید."}
try:
length_of_course = int(request.form.get("len"))
except ValueError:
return {"Color": "red", "Message": "طول دوره باید عدد باشد."}
if (
Database().get_courses_data_from_db(slug) != ""
and uploaded_file.filename == ""
):
return {"Message": "تصویر را آپلود کنید."}
result_pic = General().save_picture_of_course(
slug, uploaded_file, length_of_course
)
if result_pic["Result"] is False:
return result_pic
if request.form.get("price-status") == "Free":
price = "0"
free = True
else:
price = request.form.get("price")
free = False
try:
if request.form.get("soon") == "Soon":
days_till_open = int(request.form.get("date_open"))
else:
days_till_open = 0
except ValueError:
General().remove_file(result_pic["path"])
return {"Color": "red", "Message": "فاصله زمانی تا باز شدن باید عدد باشد."}
try:
int(price.replace(",", ""))
except ValueError:
General().remove_file(result_pic["path"])
return {"Color": "red", "Message": "قیمت دوره باید عدد باشد."}
message = Database().add_course_data_to_db(
name=request.form.get("name"),
slug=slug,
description=request.form.get("description"),
intro=None,
image_href=result_pic["href"],
now_price=int(price.replace(",", "")),
length_of_course=int(request.form.get("len")),
robbin=request.form.get("robbin"),
free=free,
days_till_publish=days_till_open
)
if message is not True:
General().remove_file(result_pic["path"])
return message
message = form_course(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
message["Result"] = message["Message"]
message["Color"] = "red"
flash(message)
return redirect(url_for("admin_add.add_course_admin"))
return render_template(
"admin/admin_add_course.html",
)
@admin_add.route("/Admin/Add/Course-Info", methods=["POST", "GET"])
def add_course_info_admin():
""" The Add Course information Page as an admin. """
if request.method == "POST":
message = Database().add_course_info_to_db(
slug=request.form.get("slug"),
introduction=request.form.get("intro"),
speciality=request.form.get("speciality"),
importance=request.form.get("importance"),
why=request.form.get("why"),
length=request.form.get("length"),
price=request.form.get("price"),
last_words=request.form.get("last_word"),
)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
message["Result"] = message["Message"]
message["Color"] = "red"
flash(message)
return redirect(url_for("admin_add.add_course_info_admin"))
return render_template(
"admin/admin_add_course_info.html",
Courses=PageDetails().get_all_courses_info_categorized_by_info_existence(),
)
@admin_add.route("/Admin/Add/Post", methods=["POST", "GET"])
def add_post_blog_admin():
""" The Add a Post for blog Page as an admin. """
if request.method == "POST":
def form_handler(request):
text = request.form.get("text")
slug = (request.form.get("name_eng").replace(" ","-")).replace("_","-")
if request.form.get("name_persian") == "":
return {"Result": False, "Message": "نام فارسی دوره را وارد کنید."}
if slug == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if text == "":
return {"Result": False, "Message": "متن اصلی را وارد کنید."}
uploaded_file = request.files["cover"]
if uploaded_file.filename == "":
return {"Message": "تصویر را آپلود کنید."}
english_name = slug
uploaded_image = request.files.get("cover")
uploaded_image_bytes = uploaded_image.read()
format_file = General().format_recognizer(uploaded_image_bytes)
General().setup_blog_post_folder(slug)
file_name = "blog-cover_" + english_name + "." + format_file
location_image = "static/assets/images/blog/{}/".format(slug) + file_name
location_image_href = "/static//assets/images/blog/{}/".format(slug) + file_name
with open(location_image, "wb") as file:
file.write(uploaded_image_bytes)
General().image_resizer_using_imgp(location_image, 1500)
General().image_optimizer_using_imgp(location_image)
links_images = re.findall("src=[\"\'](.*?)[\"\']", text)
for link in links_images:
file_path = General().href_to_path(link)
file_name = (file_path.split("/"))[-1]
new_file_path = "static/assets/images/blog/{slug}/{file_name}".format(slug=slug, file_name=file_name)
new_file_href = "/static//assets/images/blog/{slug}/{file_name}".format(slug=slug, file_name=file_name)
General().move_file_path(file_path, new_file_path)
text = text.replace(link, new_file_href)
message = Database().add_post_blog_to_db(
persian_name=request.form.get("name_persian"),
eng_name=english_name,
cover_href=location_image_href,
text=text,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_post_blog_admin"))
return render_template(
"admin/admin_add_post.html",
)
@admin_add.route("/Admin/Add/Music", methods=["POST", "GET"])
def add_music_admin():
""" The Add Music as an admin. """
if request.method == "POST":
def form_handler(request):
cover = request.files.get("cover")
music = request.files.get("music")
creator = request.form.get("creator")
name = request.form.get("music_name")
if music.filename == "":
return {"Result": False, "Message": "موزیک را اپلود کنید."}
if cover.filename == "":
return {"Result": False, "Message": "کاور را اپلود کنید."}
if name == "" or creator == "":
return {"Result": False, "Message": "اطلاعات موزیک را وارد کنید."}
message = Database().add_music_data_to_db(
cover=cover,
music=music,
creator=creator,
name=name,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_add.add_music_admin"))
return render_template(
"admin/admin_add_music.html",
)
@admin_add.route("/Admin/Add/tool", methods=["POST", "GET"])
@admin_add.route("/Admin/Add/Tool", methods=["POST", "GET"])
def add_tool_admin():
""" The Add Tool Page as an admin. """
if request.method == "POST":
def form_course(request):
slug = request.form.get("slug").replace(" ","-")
if slug == "":
return {"Message": "نام انگلیسی را وارد کنید."}
try:
uploaded_file = request.files["cover"]
except:
return {"Message": "تصویر را آپلود کنید."}
if (
Database().get_courses_data_from_db(slug) != ""
and uploaded_file.filename == ""
):
return {"Message": "تصویر را آپلود کنید."}
result_pic = General().save_picture_of_tool(slug, uploaded_file)
if result_pic["Result"] is False:
return result_pic
if request.form.get("price-status") == "Free":
price = "0"
else:
price = request.form.get("price")
try:
if request.form.get("soon") == "Soon":
days_till_open = int(request.form.get("date_open"))
else:
days_till_open = 0
except ValueError:
General().remove_file(result_pic["path"])
return {"Color": "red", "Message": "فاصله زمانی تا باز شدن باید عدد باشد."}
try:
int(price.replace(",", ""))
except ValueError:
General().remove_file(result_pic["path"])
return {"Color": "red", "Message": "قیمت باید عدد باشد."}
message = Database().add_tool_to_db(
persian_name=request.form.get("name"),
slug=slug,
description=request.form.get("description"),
image_href=result_pic["href"],
now_price=int(price.replace(",", "")),
robbin=request.form.get("robbin"),
price=price,
days_till_publish=days_till_open
)
if message is not True:
General().remove_file(result_pic["path"])
return message
message = form_course(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
message["Result"] = message["Message"]
message["Color"] = "red"
flash(message)
return redirect(url_for("admin_add.add_tool_admin"))
return render_template(
"admin/admin_add_tool.html",
)
@admin_add.route("/Admin/Generate/Post", methods=["POST", "GET"])
def generate_post():
return render_template(
"admin/admin_generate_instagram_post.html",
templates=General().open_json_file("static/assets/instagram/posts.json")
)
| UTF-8 | Python | false | false | 39,273 | py | 78 | accesses_add.py | 19 | 0.525702 | 0.525195 | 0 | 1,000 | 36.468 | 148 |
L-sky/Master_Thesis | 11,235,634,489,777 | 9d1ae6a18ee2d19298f41e50bde3e0c4327e3af8 | 11e58f5519b17b8a9bee0336c1e0f10999f637ba | /elastic_tensor/elastic_coef.py | 3a5c45ba52f5d55640298bb8a5300aca2d7e87d5 | [
"MIT"
]
| permissive | https://github.com/L-sky/Master_Thesis | 57e646225edffcc614ebc278324ef7616838c110 | 609e3b1c81dfb2e13d86df106d81e2e56d32488d | refs/heads/master | 2022-12-25T05:13:39.040748 | 2020-10-10T21:39:09 | 2020-10-10T21:39:09 | 259,278,932 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sympy
from sympy import sqrt
from elastic_tensor.rgaunt import rgaunt_p as rgaunt
from functools import partial
common = rgaunt(0, 0, 0, 0, 0, 0)
rgaunt_011 = partial(rgaunt, l1=0, l2=1, l3=1)
rgaunt_211 = partial(rgaunt, l1=2, l2=1, l3=1)
rgaunt_022 = partial(rgaunt, l1=0, l2=2, l3=2)
rgaunt_222 = partial(rgaunt, l1=2, l2=2, l3=2)
rgaunt_422 = partial(rgaunt, l1=4, l2=2, l3=2)
def A(alpha, beta, gamma, delta):
return rgaunt_011(0, alpha, beta) * rgaunt_011(0, gamma, delta) * common
def B(alpha, beta, gamma, delta, m):
B_value = rgaunt_022(0, m, m) * (rgaunt_011(0, alpha, beta) * rgaunt_211(m, gamma, delta) + rgaunt_011(0, gamma, delta) * rgaunt_211(m, alpha, beta))
return B_value
def D(alpha, beta, gamma, delta):
quintet = [-2, -1, 0, +1, +2]
D_value = 0
for mp in quintet:
D_value += rgaunt_211(mp, alpha, beta) * rgaunt_211(mp, gamma, delta) * rgaunt_022(0, mp, mp)
return D_value
def E(alpha, beta, gamma, delta, m):
quintet = [-2, -1, 0, +1, +2]
E_value = 0
for m1 in quintet:
for m2 in quintet:
E_value += rgaunt_211(m1, alpha, beta) * rgaunt_211(m2, gamma, delta) * rgaunt_222(m, m1, m2)
return E_value
def H(alpha, beta, gamma, delta, m):
quintet = [-2, -1, 0, +1, +2]
H_value = 0
for m1 in quintet:
for m2 in quintet:
H_value += rgaunt_211(m1, alpha, beta) * rgaunt_211(m2, gamma, delta) * rgaunt_422(m, m1, m2)
return H_value
def sph_cov_mat():
B_n2 = partial(B, m=-2)
B_n1 = partial(B, m=-1)
B_0 = partial(B, m=0)
B_p1 = partial(B, m=+1)
B_p2 = partial(B, m=+2)
E_n2 = partial(E, m=-2)
E_n1 = partial(E, m=-1)
E_0 = partial(E, m=0)
E_p1 = partial(E, m=+1)
E_p2 = partial(E, m=+2)
H_n4 = partial(H, m=-4)
H_n3 = partial(H, m=-3)
H_n2 = partial(H, m=-2)
H_n1 = partial(H, m=-1)
H_0 = partial(H, m=0)
H_p1 = partial(H, m=+1)
H_p2 = partial(H, m=+2)
H_p3 = partial(H, m=+3)
H_p4 = partial(H, m=+4)
sph_func = [A,
B_n2, B_n1, B_0, B_p1, B_p2,
D,
E_n2, E_n1, E_0, E_p1, E_p2,
H_n4, H_n3, H_n2, H_n1, H_0, H_p1, H_p2, H_p3, H_p4]
mat = sympy.zeros(81, 21)
triplet = [-1, 0, +1]
for idx in range(81):
alpha_p = idx // 27
beta_p = (idx % 27) // 9
gamma_p = (idx % 9) // 3
delta_p = idx % 3
alpha = alpha_p - 1
beta = beta_p - 1
gamma = gamma_p - 1
delta = delta_p - 1
for j, sph in enumerate(sph_func):
mat[idx, j] = sph(alpha, beta, gamma, delta)
return mat
def sph_cov_mat_sub():
B_n2 = partial(B, m=-2)
B_n1 = partial(B, m=-1)
B_0 = partial(B, m=0)
B_p1 = partial(B, m=+1)
B_p2 = partial(B, m=+2)
E_n2 = partial(E, m=-2)
E_n1 = partial(E, m=-1)
E_0 = partial(E, m=0)
E_p1 = partial(E, m=+1)
E_p2 = partial(E, m=+2)
H_n4 = partial(H, m=-4)
H_n3 = partial(H, m=-3)
H_n2 = partial(H, m=-2)
H_n1 = partial(H, m=-1)
H_0 = partial(H, m=0)
H_p1 = partial(H, m=+1)
H_p2 = partial(H, m=+2)
H_p3 = partial(H, m=+3)
H_p4 = partial(H, m=+4)
sph_func = [A,
B_n2, B_n1, B_0, B_p1, B_p2,
D,
E_n2, E_n1, E_0, E_p1, E_p2,
H_n4, H_n3, H_n2, H_n1, H_0, H_p1, H_p2, H_p3, H_p4]
mat = sympy.zeros(21, 21)
quadruplets = [(-1, -1, -1, -1), (-1, -1, -1, 0), (-1, -1, -1, +1), (-1, -1, 0, 0), (-1, -1, 0, +1), (-1, -1, +1, +1),
(-1, 0, -1, 0), (-1, 0, -1, +1), (-1, 0, 0, 0), (-1, 0, 0, +1), (-1, 0, +1, +1),
(-1, +1, -1, +1), (-1, +1, 0, 0), (-1, +1, 0, +1), (-1, +1, +1, +1),
(0, 0, 0, 0), (0, 0, 0, +1), (0, 0, +1, +1),
(0, +1, 0, +1), (0, +1, +1, +1),
(+1, +1, +1, +1)]
for idx, quadruplet in enumerate(quadruplets):
for j, sph in enumerate(sph_func):
mat[idx, j] = sph(*quadruplet)
return mat
def reconstruction_coef():
T_scaled = sympy.Matrix([
[1, 0, 0, -2 * sqrt(5) / 5, 0, -2 * sqrt(15) / 5, 4 / 5, 0, 0, -4 * sqrt(5) / 35, 0, -4 * sqrt(15) / 35, 0, 0, 0, 0, 9 / 35, 0, 6 * sqrt(5) / 35, 0, 3 * sqrt(35) / 35],
[0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, -3 * sqrt(70) / 70, 0, -9 * sqrt(10) / 70, 0, 0, 0, 0, 0],
[0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, -3 * sqrt(35) / 35, 0, -3 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0],
[1, 0, 0, sqrt(5) / 5, 0, -sqrt(15) / 5, -2 / 5, 0, 0, -4 * sqrt(5) / 35, 0, 4 * sqrt(15) / 35, 0, 0, 0, 0, -12 / 35, 0, -6 * sqrt(5) / 35, 0, 0],
[0, 0, 0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, -4 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, -3 * sqrt(10) / 70, 0, -3 * sqrt(70) / 70, 0],
[1, 0, 0, -2 * sqrt(5) / 5, 0, 0, -2 / 5, 0, 0, 8 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0, 3 / 35, 0, 0, 0, -3 * sqrt(35) / 35],
[0, 0, 0, 0, 0, 0, 3 / 5, 0, 0, 3 * sqrt(5) / 35, 0, -3 * sqrt(15) / 35, 0, 0, 0, 0, -12 / 35, 0, -6 * sqrt(5) / 35, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, -3 * sqrt(10) / 70, 0, -3 * sqrt(70) / 70, 0],
[0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, 6 * sqrt(10) / 35, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 3 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, 6 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0],
[0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, -4 * sqrt(15) / 35, 0, 0, 0, 0, 3 * sqrt(70) / 70, 0, -3 * sqrt(10) / 70, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 3 / 5, 0, 0, -6 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0, 3 / 35, 0, 0, 0, -3 * sqrt(35) / 35],
[0, sqrt(15) / 5, 0, 0, 0, 0, 0, -4 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, 6 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3 * sqrt(15) / 35, 0, 0, 0, 0, 3 * sqrt(70) / 70, 0, -3 * sqrt(10) / 70, 0, 0, 0, 0, 0],
[0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, 3 * sqrt(35) / 35, 0, -3 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 4 * sqrt(5) / 5, 0, 0, 4 / 5, 0, 0, 8 * sqrt(5) / 35, 0, 0, 0, 0, 0, 0, 24 / 35, 0, 0, 0, 0],
[0, 0, 0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, 6 * sqrt(10) / 35, 0, 0, 0],
[1, 0, 0, sqrt(5) / 5, 0, sqrt(15) / 5, -2 / 5, 0, 0, -4 * sqrt(5) / 35, 0, -4 * sqrt(15) / 35, 0, 0, 0, 0, -12 / 35, 0, 6 * sqrt(5) / 35, 0, 0],
[0, 0, 0, 0, 0, 0, 3 / 5, 0, 0, 3 * sqrt(5) / 35, 0, 3 * sqrt(15) / 35, 0, 0, 0, 0, -12 / 35, 0, 6 * sqrt(5) / 35, 0, 0],
[0, 0, 0, 0, sqrt(15) / 5, 0, 0, 0, 0, 0, 2 * sqrt(15) / 35, 0, 0, 0, 0, 0, 0, -9 * sqrt(10) / 70, 0, 3 * sqrt(70) / 70, 0],
[1, 0, 0, -2 * sqrt(5) / 5, 0, 2 * sqrt(15) / 5, 4 / 5, 0, 0, -4 * sqrt(5) / 35, 0, 4 * sqrt(15) / 35, 0, 0, 0, 0, 9 / 35, 0, -6 * sqrt(5) / 35, 0, 3 * sqrt(35) / 35]
])
return T_scaled
def deconstruction_coef():
T_inv_scaled_transposed = sympy.Matrix([
[1 / 9, 0, 0, -sqrt(5) / 18, 0, -sqrt(15) / 18, 1 / 9, 0, 0, -sqrt(5) / 18, 0, -sqrt(15) / 18, 0, 0, 0, 0, 1 / 8, 0, sqrt(5) / 12, 0, sqrt(35) / 24],
[0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, -sqrt(70) / 12, 0, -sqrt(10) / 4, 0, 0, 0, 0, 0],
[0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, -sqrt(35) / 6, 0, -sqrt(5) / 6, 0, 0, 0, 0, 0, 0],
[2 / 9, 0, 0, sqrt(5) / 18, 0, -sqrt(15) / 18, -1 / 9, 0, 0, -sqrt(5) / 9, 0, sqrt(15) / 9, 0, 0, 0, 0, -1 / 3, 0, -sqrt(5) / 6, 0, 0],
[0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, -2 * sqrt(15) / 9, 0, 0, 0, 0, 0, 0, -sqrt(10) / 12, 0, -sqrt(70) / 12, 0],
[2 / 9, 0, 0, -sqrt(5) / 9, 0, 0, -1 / 9, 0, 0, 2 * sqrt(5) / 9, 0, 0, 0, 0, 0, 0, 1 / 12, 0, 0, 0, -sqrt(35) / 12],
[0, 0, 0, 0, 0, 0, 1 / 3, 0, 0, sqrt(5) / 6, 0, -sqrt(15) / 6, 0, 0, 0, 0, -2 / 3, 0, -sqrt(5) / 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(15) / 3, 0, 0, 0, 0, 0, 0, -sqrt(10) / 6, 0, -sqrt(70) / 6, 0],
[0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, 0, sqrt(10) / 3, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(15) / 3, 0, 0, 0, 0, 0, 0, 2 * sqrt(5) / 3, 0, 0, 0, 0, 0, 0],
[0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, -2 * sqrt(15) / 9, 0, 0, 0, 0, sqrt(70) / 12, 0, -sqrt(10) / 12, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1 / 3, 0, 0, -sqrt(5) / 3, 0, 0, 0, 0, 0, 0, 1 / 6, 0, 0, 0, -sqrt(35) / 6],
[0, sqrt(15) / 9, 0, 0, 0, 0, 0, -2 * sqrt(15) / 9, 0, 0, 0, 0, 0, 0, sqrt(5) / 3, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, sqrt(15) / 3, 0, 0, 0, 0, sqrt(70) / 6, 0, -sqrt(10) / 6, 0, 0, 0, 0, 0],
[0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, sqrt(35) / 6, 0, -sqrt(5) / 6, 0, 0, 0, 0, 0, 0],
[1 / 9, 0, 0, sqrt(5) / 9, 0, 0, 1 / 9, 0, 0, sqrt(5) / 9, 0, 0, 0, 0, 0, 0, 1 / 3, 0, 0, 0, 0],
[0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, 0, sqrt(10) / 3, 0, 0, 0],
[2 / 9, 0, 0, sqrt(5) / 18, 0, sqrt(15) / 18, -1 / 9, 0, 0, -sqrt(5) / 9, 0, -sqrt(15) / 9, 0, 0, 0, 0, -1 / 3, 0, sqrt(5) / 6, 0, 0],
[0, 0, 0, 0, 0, 0, 1 / 3, 0, 0, sqrt(5) / 6, 0, sqrt(15) / 6, 0, 0, 0, 0, -2 / 3, 0, sqrt(5) / 3, 0, 0],
[0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, sqrt(15) / 9, 0, 0, 0, 0, 0, 0, -sqrt(10) / 4, 0, sqrt(70) / 12, 0],
[1 / 9, 0, 0, -sqrt(5) / 18, 0, sqrt(15) / 18, 1 / 9, 0, 0, -sqrt(5) / 18, 0, sqrt(15) / 18, 0, 0, 0, 0, 1 / 8, 0, -sqrt(5) / 12, 0, sqrt(35) / 24]
])
return T_inv_scaled_transposed.T
| UTF-8 | Python | false | false | 9,563 | py | 32 | elastic_coef.py | 23 | 0.419847 | 0.242183 | 0 | 200 | 46.815 | 176 |
futureneer/simple_ur | 9,964,324,128,182 | e34057714ee618b349b02b506e543ce2badf67db | e3cf50c77945d624bd22b5a2f8fcea8c1f786311 | /simple_ur_driver/nodes/simple_urx_rt_example.py | a768e380b61e0191e84c3ef9e038ed9ee3722bb8 | [
"BSD-2-Clause"
]
| permissive | https://github.com/futureneer/simple_ur | 9a8f85a18d907c6193ccae682c583a06f153996a | 0004c6450c70f3ceb06e0e96f26dde8479012c28 | refs/heads/master | 2016-09-15T06:28:53.478621 | 2015-06-27T03:56:33 | 2015-06-27T03:56:33 | 22,484,196 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import rospy
import numpy as np
from copy import deepcopy
import urx
import logging
import socket
import struct
if __name__ == "__main__":
# rob = urx.Robot("192.168.1.155", logLevel=logging.INFO)
print 'connected to robot'
sock = socket.create_connection(('192.168.1.155', 30003), timeout=0.5)
print 'created socket'
# if rob:
# realtime_monitor = rob.get_realtime_monitor()
# print 'created realtime monitor connection'
rospy.sleep(1)
while not rospy.is_shutdown():
sock.send(struct.pack("!i", 2))
rospy.sleep(.1)
print 'sending'
# for n in range(500):
# vels = [0.0, 0.0, 0.0, 0.0, -0.5, 0.0]
# acc = .1
# vels.append(acc)
# timeout = .01
# vels.append(timeout)
# prog = "speedl([{},{},{},{},{},{}], a={}, t_min={})\n".format(*vels)
# # prog = "textmsg({})\n".format(n)
# if type(prog) != bytes:
# prog = prog.encode()
# # print 'sending command ['+str(prog)+']'
# sock.send(prog)
# # print realtime_monitor.get_all_data(wait=False)['tcp'][2]
# # sock.recv(1044)
# rospy.sleep(.007)
# rospy.sleep(1)
sock.close()
# rob.cleanup() | UTF-8 | Python | false | false | 1,259 | py | 19 | simple_urx_rt_example.py | 12 | 0.544877 | 0.499603 | 0 | 44 | 27.636364 | 78 |
WillWagnerIV/Python-and-Data-Science | 13,073,880,479,252 | 0790878db4832faff9b8cd5f5dca61ea5a6525ad | 9ba84a4f815b217d7f62d63f0c340917f02a4a33 | /recipes/hw6pr2.py | ba388fd04b8ea9f1719c84b2511bcd165852ce61 | []
| no_license | https://github.com/WillWagnerIV/Python-and-Data-Science | e69bfd75ee512d429df687e4be938ca7aa80b46a | 259f5c48065275948efe6f3c368c77b82fe4c963 | refs/heads/master | 2020-03-22T06:33:53.738606 | 2018-07-04T00:10:21 | 2018-07-04T00:10:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# starting examples for cs35, week1 "Web as Input"
#
import requests
import string
import json
"""
Examples to run for problem 1:
Web scraping, the basic command (Thanks, Prof. Medero!)
#
# basic use of requests:
#
url = "https://www.cs.hmc.edu/~dodds/demo.html" # try it + source
result = requests.get(url)
text = result.text # provides the source as a large string...
#
# try it for another site...
#
#
# let's try the Open Google Maps API -- also provides JSON-formatted data
# See the webpage for the details and allowable use
#
# Try this one by hand - what are its parts?
# http://maps.googleapis.com/maps/api/distancematrix/json?origins=%22Claremont,%20CA%22&destinations=%22Seattle,%20WA%22&mode=%22walking%22
#
# Take a look at the result -- perhaps using this nice site for editing + display:
#
# A nice site for json display and editing: https://jsoneditoronline.org/
#
#
"""
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Problem 1
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# example of calling the google distance API
#
def google_api(Sources, Dests, Mode):
""" Inputs: Sources is a list of starting places
Dests is a list of ending places
This function uses Google's distances API to find the distances
from all Sources to all Dests.
It saves the result to distances.json
Problem: right now it only works with the FIRST of each list!
"""
print("Start of google_api")
url="http://maps.googleapis.com/maps/api/distancematrix/json"
if len(Sources) < 1 or len(Dests) < 1:
print("Sources and Dests need to be lists of >= 1 city each!")
return
start = Sources[0]
end = Dests[0]
my_mode= Mode # driving, walking, biking
for z in range(1,len(Sources)):
start += "|"
start += Sources[z]
for z in range(1,len(Dests)):
end += "|"
end += Dests[z]
inputs={"origins":start,"destinations":end,"mode":my_mode}
result = requests.get(url,params=inputs)
data = result.json()
# print("data is", data)
#
# save this json data to the file named distances.json
#
filename_to_save = "distances.json"
f = open( filename_to_save, "w" ) # opens the file for writing
string_data = json.dumps( data, indent=2 ) # this writes it to a string
f.write(string_data) # then, writes that string to a file
f.close() # and closes the file
print("\nFile", filename_to_save, "written.")
# no need to return anything, since we're better off reading it from file later...
return
#
# example of handling json data via Python's json dictionary
#
def json_process():
""" This function reads the json data from "distances.json"
It should build a formatted table of all pairwise distances.
_You_ decide how to format that table (better than JSON!)
"""
filename_to_read = "distances.json"
f = open( filename_to_read, "r" )
string_data = f.read()
JD = json.loads( string_data ) # JD == "json dictionary"
# print("The unformatted data in", filename_to_read, "is\n\n", JD, "\n")
# print("Accessing some components:\n")
row0 = JD['rows'][0]
# print("row0 is", row0, "\n")
cell0 = row0['elements'][0]
# print("cell0 is", cell0, "\n")
distance_as_string = cell0['distance']['text']
# print("distance_as_string is", distance_as_string, "\n")
# we may want to continue operating on the whole json dictionary
# so, we return it:
return JD
def pTable():
JD = json_process()
s = 0
# print('\n\n')
# while i < (len(row0['elements'])):
while s < (len(Sources)):
row = JD['rows'][s]
oAdd = JD['origin_addresses'][s]
print("Drive Time from " + oAdd + " to:")
for d in range(len(Dests)):
dAdd = JD['destination_addresses'][d]
elems = row['elements'][d]
distance_as_string = elems['distance']['text']
duration_as_string = elems['duration']['text']
print("- "+dAdd+ " is", distance_as_string,"in "+ duration_as_string)
s += 1
print()
return
#
# We're scripting!
#
if True:
""" here's everything we'd like to run, when the file runs... """
# Sources
Sources = ["Claremont,CA","Seattle,WA","Philadelphia,PA"]
# Destinations
Dests = ["Seattle,WA","Miami+FL","Boston+MA"]
if True: # do we want to run the API call?
google_api(Sources, Dests, "Driving") # get a new JSON file and save it!
# either way, we want to process the JSON file:
json_process()
pTable()
# Sources
Sources = ["Kinshasa,Congo","Libreville,Gabon"]
# Destinations
Dests = ["Cairo,Egypt","Freetown, Ivory Coast"]
if True: # do we want to run the API call?
google_api(Sources, Dests, "Walking") # get a new JSON file and save it!
# either way, we want to process the JSON file:
json_process()
pTable()
# https://maps.googleapis.com/maps/api/distancematrix/json?origins=Vancouver+BC|Seattle&destinations=San+Francisco|Victoria+BC&mode=bicycling&language=fr-FR&key=YOUR_API_KEY | UTF-8 | Python | false | false | 5,330 | py | 38 | hw6pr2.py | 22 | 0.59531 | 0.58743 | 0 | 204 | 25.122549 | 173 |
anhlbt/VietnameseOCR | 10,960,756,551,607 | 2f7d67023971af4d23ba6deb8b6ce4c5167e18b7 | be591c275830d6c4a66e22e7337dfa65bd732406 | /generate_dataset.py | 649b421e47ce5bf614727015e794431215203a51 | [
"Apache-2.0"
]
| permissive | https://github.com/anhlbt/VietnameseOCR | e100162f0489b817c74e7c1be000a22dbbbad421 | ccc2fbda772f72aeba4667769096beb51eeca9f2 | refs/heads/master | 2021-07-15T04:10:00.947369 | 2021-07-01T05:35:02 | 2021-07-01T05:35:02 | 189,510,030 | 0 | 0 | Apache-2.0 | true | 2019-05-31T01:59:46 | 2019-05-31T01:59:46 | 2019-05-28T05:11:22 | 2018-08-30T03:01:40 | 110,223 | 0 | 0 | 0 | null | false | false | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import os
import random
import sys
from config import *
class DataGenerator:
def __init__(self):
self.i = 0
self.log = []
self.errors = []
self.data_folder = DATASET_DIR
self.font_list = FONT_LIST
self.data_set_csv = DATASET_FILE
self.characters = []
self.dataset_size = 0
def rgb2gray(self, rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def get_list_characters(self):
if len(self.characters) != 0:
return self.characters
else:
characters = []
with open(CHARACTERS_SET) as cf:
for r in cf:
if ',,' in r:
c = ','
else:
_, c = r.split(',')
characters.append(c.replace('\n', ''))
self.characters = characters
return characters
def create_text_image(self, text, font_ttf, idx_category, font_size):
try:
image = Image.new("RGB", (IMG_WIDTH, IMG_HEIGHT), (255, 255, 255))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(font_ttf, font_size)
w, h = draw.textsize(text, font=font)
draw.text(((IMG_WIDTH - w) / 2, (IMG_HEIGHT - h) / 2), text, (0, 0, 0), font=font)
if SAVE_TEXT_IMAGE_TO_DISK:
image.save(self.data_folder + str(idx_category) + '/' + str(self.i) + '.jpg')
self.log.append({'font': font_ttf, 'image': str(self.i) + '.jpg'})
self.i = self.i + 1
return image
except Exception as e:
self.errors.append({'font': font_ttf, 'errors': str(e)})
return None
def generate_data_set(self, text, idx_category):
images = []
with open(self.font_list, 'r') as fonts:
for font in fonts:
if '#' not in font:
for font_size in range(FONT_SIZE_MIN, FONT_SIZE_MAX + 1):
image = self.create_text_image(text, font.replace('\n', ''), idx_category, font_size)
if image != None:
self.dataset_size = self.dataset_size + 1
images.append(image)
self.i = 0
return images
def generate_dataset(self):
characters = self.get_list_characters()
for idx, ch in enumerate(characters):
if SAVE_TEXT_IMAGE_TO_DISK and not os.path.exists(self.data_folder + str(idx)):
os.makedirs(self.data_folder + str(idx))
c_images = self.generate_data_set(ch, idx)
print('.', end='')
for ic in c_images:
image = np.asarray(ic)
image = self.rgb2gray(image)
image = image.reshape(1, IMG_WIDTH * IMG_HEIGHT)
with open(DATASET_FILE, 'ab') as df:
image = np.concatenate((image, np.array([[int(idx)]])), axis=1)
np.savetxt(df, image, delimiter=",", fmt="%d")
if __name__ == "__main__":
print('Generating dataset...')
generator = DataGenerator()
generator.generate_dataset()
print('Text Image Dataset is generated:', DATASET_FILE_PATH)
| UTF-8 | Python | false | false | 3,424 | py | 10 | generate_dataset.py | 6 | 0.517231 | 0.505841 | 0 | 97 | 34.298969 | 109 |
ZIP97/Green | 6,786,048,352,275 | 7827b254a037e1dd627886732320998c10fb4acd | 78a7fbbb28ee8c7e47c2f4c3f815241efe0a44b7 | /temp/网易财经-年报解析.py | 3142483131783115bbf76cb70f8dfefc5b930c92 | []
| no_license | https://github.com/ZIP97/Green | 0287f8551f261f5735ceab2cc44142822b3874d2 | 27fc5277ab59a94ccf4a468331d75aec44cb7056 | refs/heads/master | 2020-04-18T22:24:43.915693 | 2019-02-10T15:26:27 | 2019-02-10T15:26:27 | 167,792,880 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
import numpy as np
import pandas as pd
import requests, os
from bs4 import BeautifulSoup
from multiprocessing import Pool
from Proxy import get_proxy, getWeb, gen_proxy
proxies = get_proxy()
def get_write(i):
'''
获取每年年报,并写入 txt 文件
:param i: 年度报告列表第几个年报
'''
text = getWeb(df.href.iloc[i], Return="text", proxies=proxies)
text = text.replace(" ", "")
# text = text.replace("\r<br/>", "")
txtFile = r"{}{}_{}.txt".format(txtPath, code, df["公告标题"].iloc[i])
with open(txtFile, "w") as f:
f.write(text)
return text
txtPath = "./txt/"
csvPath = "./年度报告/"
code = "601899"
corp = "紫金矿业"
# 解析 网易财经 定期报告表格,获取年报链接
url = "http://quotes.money.163.com/f10/gsgg_{},dqbg.html".format(code)
soupTable = getWeb(url, proxies=proxies, Return="soup")
result = soupTable.find_all("div", class_="tabs_panel")
res = result[0].get_text().split("\n")
res = list(filter(None, res))[:-1]
href = ["http://quotes.money.163.com"+item.get("href") for item in result[0].find_all("a")][:-3]
npRes = np.array(res).reshape(-1, 3)
df = pd.DataFrame(npRes[1:, :], columns=npRes[0, :])
df["href"] = href
df["releaseYear"] = df["公布日期"].map(lambda x: x[:4])
index = df["公告类型"]=="年度报告"
df = df[index]
df.to_csv("{}{}_{}.csv".format(csvPath, code, corp), encoding="gbk", index=False)
# 多进程获取网页数据并保存
pool = Pool()
temp = pool.imap_unordered(get_write, range(len(df)))
texts = [item for item in temp]
pool.close()
| UTF-8 | Python | false | false | 1,599 | py | 836 | 网易财经-年报解析.py | 14 | 0.638947 | 0.621622 | 0 | 47 | 29.659574 | 96 |
galegosimpatico/playgrnd | 18,262,200,954,502 | 68d42fc602b8eaddc21a764a4e8dff586a3fe384 | 96e78090a515a531c2020e44b09734e01e8cd29d | /contest/tuenti/2015/2/almost.py | 7c6143fd523f1811df968aa350736d432635f9f2 | []
| no_license | https://github.com/galegosimpatico/playgrnd | 8684e5465b3d6250d567b9b3fd38d01ff0aad20a | e2efe4908fce39a93ed1df7ae3bc1e0dd3a745b6 | refs/heads/master | 2019-04-30T01:32:28.112503 | 2015-05-06T07:03:57 | 2015-05-06T07:03:57 | 12,020,126 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
import sys
USE_COMPREHENSION = False
def check_prime(number, primes_set):
number_is_prime = True
for prime_number in primes_set:
if number % prime_number == 0:
number_is_prime = False
break
return number_is_prime
def grow_almost_primes_set(primes_set, almost_primes_set, new_prime, n):
for prime_number in primes_set:
almost_prime_number = new_prime * prime_number
if almost_prime_number <= n:
almost_primes_set.add(almost_prime_number)
def create_almost_primes_set(n):
if not USE_COMPREHENSION:
# Init lists.
primes_set = {2, 3}
almost_primes_set = {4, 6, 9}
# Generate all primes up to (n / 2).
for i in range(5, int(n / 2) + 1):
i_is_prime = check_prime(i, primes_set)
if i_is_prime:
primes_set.add(i)
grow_almost_primes_set(
primes_set, almost_primes_set, i, n)
return sorted(almost_primes_set)
else:
primes_set = {x for x in range(2, n) if all(
x % y for y in range(2, int(n**.5)))}
sorted_primes_set = sorted(primes_set)
almost_primes_set = {
{x * y for x in sorted_primes_set} for y in sorted_primes_set}
# almost_primes_generator = (
# (x * y for x in sorted_primes_set) for y in sorted_primes_set)
# almost_primes_set = set(almost_primes_generator)
# almost_primes_list = [
# [x * y for x in sorted_primes_set] for y in sorted_primes_set]
# almost_primes_set = set(almost_primes_list)
return sorted(almost_primes_set)
def find_almost_primes_in_interval(almost_primes_set, min_, max_):
almost_primes_in_interval = set()
for almost_prime_number in almost_primes_set:
if min_ <= almost_prime_number <= max_:
almost_primes_in_interval.add(almost_prime_number)
elif almost_prime_number > max_:
break
return almost_primes_in_interval
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file')
parser.add_argument('-o', '--output_file')
arguments = parser.parse_args()
if arguments.input_file:
input_file = open(arguments.input_file)
cases_no = int(input_file.readline())
lines = input_file.readlines()
else:
cases_no = int(sys.stdin.readline())
lines = sys.stdin.readlines()
if arguments.output_file:
output_file = open(arguments.output_file, 'w')
else:
output_file = None
# Find most bigger 'b' present in any test case.
max_b = 6
for line in lines:
candidate_b = int(line.split(' ')[1])
if candidate_b > max_b:
max_b = candidate_b
almost_primes_set = create_almost_primes_set(max_b)
for case in range(cases_no):
a, b = map(int, lines[case].split(' '))
almost_primes_in_interval = find_almost_primes_in_interval(
almost_primes_set, a, b)
case_result = almost_primes_in_interval.__len__()
if arguments.output_file:
output_file.write(str(case_result) + '\n')
output_file.flush()
else:
print(case_result)
sys.stdout.flush()
if __name__ == '__main__':
run()
| UTF-8 | Python | false | false | 3,362 | py | 66 | almost.py | 33 | 0.580309 | 0.575848 | 0 | 105 | 31.019048 | 76 |
longshuo1995/craw_conf | 1,511,828,500,742 | 5bc68c3d9d5a87ff2f4df62c13a0db5765891bbe | 0dd0c04e495984b6ae46131fe360c597b1447915 | /crawl_conf/flask_interface/02_interface_test.py | ad392753de8b2f973794522b4ffeba9f7859704b | []
| no_license | https://github.com/longshuo1995/craw_conf | 9a20f7fdec1b821f0f1e7170619e7362a061fa0c | ae98e8fcf209bca8e959054e09621fba79864b02 | refs/heads/master | 2020-03-08T04:22:43.585495 | 2018-04-12T06:59:35 | 2018-04-12T06:59:35 | 127,919,371 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
data = {
"url": "http://www.baidu.com",
"seed": {
"article": {
"field_name": "article",
"field_rule": "//a/href",
"rule_type": "xpath",
"field_type": "article"
},
"create_time":
{
"field_name": "create_time",
"field_rule": "//span",
"rule_type": "xpath",
"field_type": "create_time"
}
}
}
data = {'data': json.dumps(data)}
res = requests.post("http://127.0.0.1:10086/parse", data=data).text
print(json.loads(res))
| UTF-8 | Python | false | false | 615 | py | 29 | 02_interface_test.py | 25 | 0.447154 | 0.429268 | 0 | 24 | 24.625 | 67 |
rogeriosilva-ifpi/adsi-algoritmos-2016.1 | 17,875,653,901,033 | 43ca87ce2dae9ce6449afed61985483aa86cf8ce | 9861218f60ab23d6ac3bc6b400c220abf4e64fb5 | /atividade_d/gerson/GERSON CAVALCANTE PROVA/q08.py | e760cb6f525802539359c55f4f03a77ebdb6e126 | []
| no_license | https://github.com/rogeriosilva-ifpi/adsi-algoritmos-2016.1 | a0b0709eb783110a9b335c8364aa41ce4f90fb24 | 1714e2480b80e46be4d96049e878bf17b692320b | refs/heads/master | 2021-06-06T18:25:00.836715 | 2016-09-07T02:02:30 | 2016-09-07T02:02:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | a = input()
b = input()
c = input()
if (b - c) < a < (b + c) and (a - c) < b < (a + c) and (a - b) < c < (a + b):
perimetro = a + b + c
print "Perimetro = %.1f " %perimetro
else:
area = ((a + b) * c )/2
print"Area : %.1f " %area | UTF-8 | Python | false | false | 240 | py | 284 | q08.py | 281 | 0.4375 | 0.425 | 0 | 9 | 24.888889 | 77 |
dfrojas/insumos | 584,115,583,415 | 5ab13b14dd9e3b2d63bda3316a794cb4fe8037ba | 80b73d863f25cdfbeb75ef0aff7ffe84266d868c | /agroecoturismo/models.py | 3f209f988790222481aac683f0d1069102d3d3e7 | []
| no_license | https://github.com/dfrojas/insumos | 18354f37fce32230aaff7ff346e0b0068a9ebb21 | 1ecafe8393a40ff4dab1689235515f49a55d5e62 | refs/heads/master | 2016-09-09T20:48:04.899543 | 2015-04-28T21:01:27 | 2015-04-28T21:01:27 | 34,008,214 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from predio.models import InfoPredioGeneral
from opciones.models import AtractivoEcoturismo, ActividadEcoturismo
class Agroecoturismo(models.Model):
predio = models.ForeignKey(InfoPredioGeneral)
atractivo = models.ManyToManyField(AtractivoEcoturismo)
actividad = models.ManyToManyField(ActividadEcoturismo)
observaciones = models.TextField()
def __str__(self):
return self.predio.nombre_predio
| UTF-8 | Python | false | false | 433 | py | 74 | models.py | 67 | 0.82679 | 0.82679 | 0 | 13 | 32.307692 | 68 |
archerckk/PyTest | 19,404,662,244,320 | 7b20318b7310321c53c80f2f6ea7a4db2fe42c9f | 595c69f717fc3ceb4e0701cc433f6d7f927b6fdb | /Ar_Script/ar_045_字典_通讯录程序.py | 323628e994e1a70f7d4c4d958cdf9b048add94cf | [
"MIT"
]
| permissive | https://github.com/archerckk/PyTest | d6462ebf46c6dbd5bb3ce03666aad0c2665367cd | 610dd89df8d70c096f4670ca11ed2f0ca3196ca5 | refs/heads/master | 2022-03-26T21:09:25.891745 | 2021-06-14T01:39:36 | 2021-06-14T01:39:36 | 129,497,345 | 0 | 0 | null | false | 2020-01-14T10:57:49 | 2018-04-14T08:23:03 | 2020-01-13T14:29:04 | 2020-01-14T10:57:49 | 28,113 | 0 | 0 | 0 | Python | false | false | print()
'尝试利用字典的特性编写一个通讯录程序'
'''
1.查询联系人资料
存在打印,不存在提示
2.插入新的联系人
不存在直接新增
已存在提示是否修改
是,覆盖原来的电话,提示修改成功
否,跳出当前新增
3.删除联系人
联系人不存在
提示删除的联系人不存在
联系人存在
提示是否删除
是,删除,打印删除id
否,跳出当前选项
4.退出通讯录程序
打印提示语,终止循环
'''
print(
'''
|---欢迎进入通讯录程序---|
|---1.查询联系人资料 ---|
|---2.插入新的联系人 ---|
|---3.删除已有联系人 ---|
|---4.退出通讯录程序 ---|'''
)
def contacts():
codeList=['1','2','3','4']
contact={}
while 1:
code=input('\n请输入相关的指令代码:')
if code not in codeList:
print('非法指令代码!',end='')
if code=='1':
name=input('请输入联系人姓名:')
# if name not in contact:
# print('你所查找的联系人不存在!!')
# else:
# print(name+':',contact[name])
'异常处理优化代码效率'
try:
print(name + ':', contact[name])
except KeyError:
print('你所查找的联系人不存在!!')
if code=='2':
name = input('请输入联系人姓名:')
if name not in contact:
contact[name]=input('请输入联系方式:')
else:
print('你输入的姓名在通讯录中已存在-->>%s:%s'%(name,contact[name]))
'两个以上的格式化参数一定要记得加()不然会说格式化参数的数量不对'
while 1:
decide = input('是否修改用户资料(YES/NO:)')
if decide in ['YES','yes','Yes']:
contact[name] = input('请输入联系方式:')
print('修改资料成功')
break
if decide in ['NO','No''no']:
break
else:
print('你的输入有误!',end='')
if code=='3':
name = input('请输入联系人姓名:')
if name in contact:
while 1:
decide = input('是否删除用户资料(YES/NO:)')
if decide in ['YES','yes','Yes']:
del contact[name]
print('删除用户%s:'%name)
break
if decide in ['NO','No','no']:
break
else:
print('你的输入有误!',end='')
else:
print('你要删除的用户不存在!')
if code=='4':
print('|---感谢使用通讯录程序---|')
break
contacts() | UTF-8 | Python | false | false | 3,067 | py | 575 | ar_045_字典_通讯录程序.py | 530 | 0.397633 | 0.389303 | 0 | 91 | 24.076923 | 69 |
aferriss/dcganMod | 17,360,257,844,229 | 366b67baa57efdba6ab9f8f5c33a4db68d243c1b | 8a1ea67914acd3e6aa32005cd955da21d9f32f41 | /test.py | 15d6f9eb82be418da1b5d9d479b97fca5f25c226 | [
"BSD-3-Clause"
]
| permissive | https://github.com/aferriss/dcganMod | 442e8bb76a05307f9c539326bbb03b4ebacdb408 | 68e0323b908c8aa7088350bd16c562a87b09e75d | refs/heads/master | 2021-01-10T14:56:09.691317 | 2017-05-16T01:26:42 | 2017-05-16T01:26:42 | 53,920,361 | 1 | 1 | null | false | 2017-05-20T17:23:28 | 2016-03-15T06:26:42 | 2016-12-21T20:51:53 | 2017-05-16T01:26:57 | 3,036 | 0 | 1 | 1 | Lua | null | null | import math
import numpy as np
import theano
ny = 11
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def OneHot(X, n=None, negative_class=0.):
print "neg class"
print n
X = np.asarray(X).flatten()
print (X)
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def get_buffer_y(steps,num_buffer_samples=10,num_buffer_steps=2):
num_buffer_rows = int(math.ceil(float(num_buffer_samples) / steps))
print("num buf rows= " + str(num_buffer_rows))
targets = np.asarray([[int(round(i*steps+_/num_buffer_steps)) for _ in range(steps)] for i in range(num_buffer_rows)])
end = np.empty([num_buffer_rows, steps], dtype='int')
for _ in range( num_buffer_rows):
empty = np.empty(steps, dtype='int')
for i in range(steps):
end[_][i] = (int(round(_*steps+i/num_buffer_steps)))
ymb = floatX(OneHot(end.flatten(), ny))
return ymb
y = get_buffer_y(5)
print y
| UTF-8 | Python | false | false | 967 | py | 3 | test.py | 1 | 0.654602 | 0.645295 | 0 | 42 | 22.02381 | 119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.