repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
m1sterzer0/JuliaAtcoder | 13,855,564,547,575 | 31555f18cc83ead0682268dc41505eb83f73d2d0 | 820fd50ab301eedb6058b5dd2d626e1f4c845467 | /python/lib.py | 2f69d5b6bc55719d0336ed1786b010560cb80fe1 | [] | no_license | https://github.com/m1sterzer0/JuliaAtcoder | 28ca1be2fb96c12db13f8abe6a45de2d73b44f44 | d8b4f5fd7698d0eff8ebfa17e026658b473d62fd | refs/heads/main | 2023-06-14T08:31:16.260185 | 2021-07-10T17:02:53 | 2021-07-10T17:02:53 | 365,635,629 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import collections
## Recall heapq has heappush,heappop,heapify for simple minheaps -- faster than this implementation
class maxHeap :
v = []
def __init__(self) : self.v = [0]
def len(self) : return len(self.v)-1
def isempty(self) : return len(self.v) == 1
def top(self) : return self.v[1]
def push(self,val) :
self.v.append(val)
self._bubbleup(len(self.v)-1)
def pop(self) :
ans = self.v[1]
xx = self.v.pop()
if len(self.v) > 1 :
self.v[1] = xx
self._bubbledown(1)
return ans
def _bubbleup(self,idx) :
if idx == 1 : return
j = idx >> 1
if self.v[j] < self.v[idx] :
self.v[j],self.v[idx] = self.v[idx],self.v[j]
self._bubbleup(j)
def _bubbledown(self,idx) :
l = idx << 1; r = l + 1
ll = len(self.v)
res1 = l >= ll or self.v[idx] >= self.v[l]
res2 = r >= ll or self.v[idx] >= self.v[r]
if res1 and res2 : return
if res1 : self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r); return
if res2 : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
if self.v[l] >= self.v[r] : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r)
class minHeap :
v = []
def __init__(self) : self.v = [0]
def len(self) : return len(self.v)-1
def isempty(self) : return len(self.v) == 1
def top(self) : return self.v[1]
def push(self,val) :
self.v.append(val)
self._bubbleup(len(self.v)-1)
def pop(self) :
ans = self.v[1]
xx = self.v.pop()
if len(self.v) > 1 :
self.v[1] = xx
self._bubbledown(1)
return ans
def _bubbleup(self,idx) :
if idx == 1 : return
j = idx >> 1
if self.v[j] > self.v[idx] :
self.v[j],self.v[idx] = self.v[idx],self.v[j]
self._bubbleup(j)
def _bubbledown(self,idx) :
l = idx << 1; r = l + 1
ll = len(self.v)
res1 = l >= ll or self.v[idx] <= self.v[l]
res2 = r >= ll or self.v[idx] <= self.v[r]
if res1 and res2 : return
if res1 : self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r); return
if res2 : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
if self.v[l] <= self.v[r] : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r)
class maxHeap :
v = []
def __init__(self) : self.v = [0]
def len(self) : return len(self.v)-1
def isempty(self) : return len(self.v) == 1
def top(self) : return self.v[1]
def push(self,val) :
self.v.append(val)
self._bubbleup(len(self.v)-1)
def pop(self) :
ans = self.v[1]
xx = self.v.pop()
if len(self.v) > 1 :
self.v[1] = xx
self._bubbledown(1)
return ans
def _bubbleup(self,idx) :
if idx == 1 : return
j = idx >> 1
if self.v[j] < self.v[idx] :
self.v[j],self.v[idx] = self.v[idx],self.v[j]
self._bubbleup(j)
def _bubbledown(self,idx) :
l = idx << 1; r = l + 1
ll = len(self.v)
res1 = l >= ll or self.v[idx] >= self.v[l]
res2 = r >= ll or self.v[idx] >= self.v[r]
if res1 and res2 : return
if res1 : self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r); return
if res2 : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
if self.v[l] >= self.v[r] : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r)
class minHeap :
v = []
def __init__(self) : self.v = [0]
def len(self) : return len(self.v)-1
def isempty(self) : return len(self.v) == 1
def top(self) : return self.v[1]
def push(self,val) :
self.v.append(val)
self._bubbleup(len(self.v)-1)
def pop(self) :
ans = self.v[1]
xx = self.v.pop()
if len(self.v) > 1 :
self.v[1] = xx
self._bubbledown(1)
return ans
def _bubbleup(self,idx) :
if idx == 1 : return
j = idx >> 1
if self.v[j] > self.v[idx] :
self.v[j],self.v[idx] = self.v[idx],self.v[j]
self._bubbleup(j)
def _bubbledown(self,idx) :
l = idx << 1; r = l + 1
ll = len(self.v)
res1 = l >= ll or self.v[idx] <= self.v[l]
res2 = r >= ll or self.v[idx] <= self.v[r]
if res1 and res2 : return
if res1 : self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r); return
if res2 : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
if self.v[l] <= self.v[r] : self.v[idx],self.v[l] = self.v[l],self.v[idx]; self._bubbledown(l); return
self.v[idx],self.v[r] = self.v[r],self.v[idx]; self._bubbledown(r)
class minHeapEnh :
vt = []; pos = {}
def __init__(self) : pass
def _swap(mh,i,j) :
(n1,n2) = (mh.vt[i][1],mh.vt[j][1])
mh.pos[n2],mh.pos[n1] = i,j
mh.vt[i],mh.vt[j] = mh.vt[j],mh.vt[i]
def _bubbleup(mh,i) :
if i == 0 : return
j = (i-1) >> 1
if mh.vt[i] < mh.vt[j] : mh._swap(i,j); mh._bubbleup(j)
def _bubbledown(mh,i) :
ll = len(mh.vt)
l = (i<<1) + 1; r = l+1
res1 = l >= ll or not (mh.vt[i] > mh.vt[l])
res2 = r >= ll or not (mh.vt[i] > mh.vt[r])
if res1 and res2 : return
if res2 or not res1 and not mh.vt[l] > mh.vt[r] :
mh._swap(i,l); mh._bubbledown(l)
else :
mh._swap(i,r); mh._bubbledown(r)
def push(mh,d,n) :
if n in mh.pos :
idx = mh.pos[n]
n2 = mh.vt[idx]
if d < n2[0] : mh.vt[idx] = (d,n); mh._bubbleup(idx)
else :
mh.vt.append((d,n))
idx = len(mh.vt)-1
mh.pos[n] = idx
mh._bubbleup(idx)
def pop(mh) :
ans = mh.vt[0]; del mh.pos[ans[1]]
n2 = mh.vt.pop()
if len(mh.vt) >= 1 :
mh.pos[n2[1]] = 0
mh.vt[0] = n2
mh._bubbledown(0)
return ans
def isempty(mh) :
return len(mh.vt) == 0
def modinvp(a,p) : return pow(a,p-2,p)
def modinv(a,p) : return pow(a,p-2,p)
def modpow(a,p,m) : return pow(a,m,p)
def egcd(a,b) :
if a == 0 : return (b,0,1)
g,y,x = egcd(b % a, a)
return (g,x-(b//a)*y,y)
def modinv2(a,m) :
g,x,y = egcd(a,m)
if g != 1 : raise Exception('modular inverse does not exist')
return x % m
class fenwicktree :
def __init__(self,n=1) :
self.n = n
self.tot = 0
self.bit = [0] * (n+1)
def clear(self) :
for i in range(self.n) : self.bit[i] = 0
self.tot = 0
def inc(self,idx,val=1) :
while idx <= self.n :
self.bit[idx] += val
idx += idx & (-idx)
self.tot += val
def dec(self,idx,val=1) : self.inc(idx,-val)
def incdec(self,left,right,val) :
self.inc(left,val); self.dec(right,val)
def prefixsum(self,idx) :
if idx < 1 : return 0
ans = 0
while idx > 0 :
ans += self.bit[idx]
idx -= idx&(-idx)
return ans
def suffixsum(self,idx) : return self.tot - self.prefixsum(idx-1)
def rangesum(self,left,right) : return self.prefixsum(right) - self.prefixsum(left-1)
class dsu :
def __init__(self,n=1) :
self.n = n
self.parentOrSize = [-1 for i in range(n)]
def merge(self,a,b) :
x = self.leader(a); y = self.leader(b)
if x == y : return x
if self.parentOrSize[y] < self.parentOrSize[x] : (x,y) = (y,x)
self.parentOrSize[x] += self.parentOrSize[y]
self.parentOrSize[y] = x
return x
def same(self,a,b) :
return self.leader(a) == self.leader(b)
def leader(self,a) :
if self.parentOrSize[a] < 0 : return a
ans = self.leader(self.parentOrSize[a])
self.parentOrSize[a] = ans
return ans
def groups(self) :
leaderBuf = [0 for i in range(self.n)]
groupSize = [0 for i in range(self.n)]
for i in range(self.n) :
leaderBuf[i] = self.leader(i)
groupSize[leaderBuf[i]] += 1
preres = [ [] for i in range(self.n) ]
for (i,v) in enumerate(leaderBuf) :
preres[v].append(i)
return [x for x in preres if x]
class dsu2 :
def __init__(self) :
self.n = 0
self.parentOrSize = {}
def add(self,x) :
if x not in self.parentOrSize :
self.n += 1
self.parentOrSize[x] = -1
def merge(self,a,b) :
x = self.leader(a); y = self.leader(b)
if x == y : return x
if self.parentOrSize[y] < self.parentOrSize[x] : (x,y) = (y,x)
self.parentOrSize[x] += self.parentOrSize[y]
self.parentOrSize[y] = x
return x
def same(self,a,b) :
return self.leader(a) == self.leader(b)
def leader(self,a) :
if self.parentOrSize[a] < 0 : return a
ans = self.leader(self.parentOrSize[a])
self.parentOrSize[a] = ans
return ans
def getGroups(self) :
res = {}
for x in self.parentOrSize :
l = self.leader(x)
if l not in res : res[l] = []
res[l].append(x)
return res
def isqrt(x) :
if x == 0 : return 0
s = int(math.sqrt(x))
s = (s + x//s) >> 1
return s-1 if s*s > x else s
class factorSieve :
n=1; fs=[]
def __init__(self,n=1) :
self.n = n; self.fs = [-1 for i in range(n+1)]
def sieve(self) :
for i in range(4,self.n+1,2) : self.fs[i] = 2
for i in range(3,isqrt(self.n)+1,2) :
if self.fs[i] > 0 : continue
for j in range(i*i,self.n+1,2*i) :
if self.fs[j] < 0 : self.fs[j] = i
def uniquepf(self,nn) :
if nn <= 1 : return []
ans = []
while True :
s = self.fs[nn]
if s == -1 :
if not ans or ans[-1] < nn : ans.append(nn)
return ans
if not ans or ans[-1] < s : ans.append(s)
nn //= s
def pf(self,nn) :
if nn <= 1 : return []
ans = []
while True :
s = self.fs[nn]
if s == -1 : ans.append(nn); return ans
ans.append(s); nn //= s
class segtree :
def __init__(self,n=1,op=sum,e=0,v=None) :
if v is not None : n = len(v)
self.n = n; self.sz = 1; self.log = 0; self.op=op; self.e=e
while self.sz < n : self.sz *= 2; self.log += 1
self.d = [self.e for i in range(2*self.sz)]
if v is not None :
for i in range(n) : self.d[self.sz+i] = v[i]
for i in range(n-1,0,-1) : self._update(i)
def _update(self,k) :
self.d[k] = self.op(self.d[2*k],self.d[2*k+1])
def set(self,p,x) :
p += self.sz
self.d[p] = x
for i in range(1,self.log+1) : self._update(p>>i)
def get(self,p,x) : return self.d[self.sz+p]
def prod(self,l,r) :
r += 1 ## want to get product from l to r inclusive
sml = self.e; smr = self.e; l += self.sz; r += self.sz
while (l < r) :
if (l & 1) : sml = self.op(sml, self.d[l]); l += 1
if (r & 1) : r -= 1; smr = self.op(self.d[r],smr)
l >>= 1; r >>= 1
return self.op(sml,smr)
def allprod(self) : return self.d[1]
class lazysegtree :
def __init__(self,n=1,op=sum,e=0,mapping=sum,composition=sum,id=0,v=None) :
if v is not None : n = len(v)
self.n = n; self.sz = 1; self.op=op; self.e=e
self.mapping = mapping; self.composition = composition; self.id = id
self.log = 0
while self.sz < n : self.sz *= 2; self.log += 1
self.d = [self.e for i in range(2*self.sz)]
self.lz = [self.id for i in range(self.sz)]
if v is not None :
for i in range(n) : self.d[self.sz+i] = v[i]
for i in range(self.sz-1,0,-1) : self._update(i)
def _update(self,k) :
#print(f"DBUG update k:{k} d[2k]:{self.d[2*k]} d[2k+1]:{self.d[2*k+1]} d:{self.d}")
self.d[k] = self.op(self.d[2*k],self.d[2*k+1])
def _allApply(self,k,f) :
self.d[k] = self.mapping(f,self.d[k])
if (k < self.sz) : self.lz[k] = self.composition(f, self.lz[k])
def _push(self,k) :
if self.lz[k] != self.id :
self._allApply(2*k,self.lz[k])
self._allApply(2*k+1,self.lz[k])
self.lz[k] = self.id
def set(self,p,x) :
p += self.sz
for i in range(self.log,0,-1) : self._push(p>>i)
self.d[p] = x
for i in range(1,self.log+1) : self._update(p>>i)
def get(self,p) :
p += self.sz
for i in range(self.log,0,-1) : self._push(p>>i)
return self.d[p]
def prod(self,l,r) :
if r < l : return self.e
l += self.sz; r += self.sz; r += 1 ## want to get product from l to r inclusive
for i in range(self.log,0,-1) :
if ((l >> i) << i) != l : self._push(l >> i)
if ((r >> i) << i) != r : self._push((r-1) >> i)
sml = self.e; smr = self.e
while (l < r) :
if (l & 1) : sml = self.op(sml, self.d[l]); l += 1
if (r & 1) : r -= 1; smr = self.op(self.d[r],smr)
l >>= 1; r >>= 1
return self.op(sml,smr)
def allprod(self) : return self.d[1]
def apply(self,p,f) :
p += self.sz
for i in range(self.log,0,-1) : self._push(p>>i)
self.d[p] = self.mapping(f,self.d[p])
for i in range(1,self.log+1) : self._update(p>>i)
def applyRange(self,l,r,f) :
if r < l : return
l += self.sz; r += self.sz; r += 1 ## want to get product from l to r inclusive
for i in range(self.log,0,-1) :
if ((l >> i) << i) != l : self._push(l >> i)
if ((r >> i) << i) != r : self._push((r-1) >> i)
l2=l; r2=r ## Save away original l,r
while (l < r) :
if (l & 1) : self._allApply(l,f); l += 1
if (r & 1) : r -= 1; self._allApply(r,f)
l >>= 1; r >>= 1
l=l2; r=r2 ## Restore original l,r
for i in range(1,self.log+1) :
if ((l >> i) << i) != l : self._update(l >> i)
if ((r >> i) << i) != r : self._update((r-1) >> i)
################################################################################
## Maxflow (Dinic from Atcoder Lib ported to python)
################################################################################
class mfEdge :
def __init__(self,from=0,to=0,cap=0,flow=0) :
self.from = from
self.to = to
self.cap = cap
self.flow = flow
class _mfEdge :
def __init__(self,to=0,rev=0,cap=0) :
self.to = to
self.rev = rev
self.cap = cap
class mfGraph :
def __init__(self,n=0) :
self._n = n
self.pos = []
self.g = [[] for i in range(n)]
def addEdge(self,from,to,cap,revcap=0) :
m = len(self.pos)
fromid = len(self.g[from])
toid = len(self.g[to])
if from == to : toid += 1
self.pos.append((from,fromid))
self.g[from].append(_mfEdge(to,toid,cap))
self.g[to].append(_mfEdge(from,fromid,revcap))
return m
def getEdge(self,i) :
pt = self.pos[i]
_e = self.g[pt[0]][pt[1]]
_re = self.g[_e.to][_e.rev]
return mfEdge(pt[0],_e.to,_e.cap+_re.cap,_re.cap)
def edges(self) :
m = len(self.pos)
result = []
for i in range(m) :
result.append(self.getEdge(i))
return result
def changeEdge(self,i,newcap,newflow) :
pt = self.pos[i]
_e = self.g[pt[0]][pt[1]]
_re = self.g[_e.to][_e.rev]
_e.cap = newcap - newflow
_re.cap = newflow
def flow(self,s,t) :
return self.flow2(s,t,10**18)
def flow2(self,s,t,flowlim) :
level = [0] * self._n
iter = [0] * self._n
que = collections.deque()
def bfs() :
for i in range(self._n) : level[i] = -1
level[s] = 0
que.clear()
que.append(s)
while que :
v = que.popleft()
for e in self.g[v] :
if e.cap == 0 or level[e.to] >= 0 : continue
level[e.to] = level[v] + 1
if e.to == t : return
que.append(e.to)
def dfs(v,up) :
if v == s : return up
g = self.g
res = 0
levelv = level[v]
for i in range(iter[v],len(g[v])) :
e = g[v][i]
if levelv <= level[e.to] : continue
cap = g[e.to][e.rev].cap
if cap == 0 : continue
d = dfs(e.to,min(up-res,cap))
if d <= 0 : continue
g[v][i].cap += d
g[e.to][e.rev].cap -= d
res += d
if res == up : return res
level[v] = self._n
return res
## Now for the main part of the dinic search
flow = 0
while (flow < flowlim) :
bfs()
if level[t] == -1 : break
for i in range(self._n) : iter[i] = 0
f = dfs(t,flowlim-flow)
if f == 0 : break
flow += f
return flow
def mincut(self,s) :
visited = [0] * self._n
que = collections.deque()
que.push(s)
while que :
p = que.popleft()
visited[p] = True
for e in self.g[p] :
if e.cap > 0 and not visited[e.to] :
visited[e.to] = True
que.append(e.to)
return visited
| UTF-8 | Python | false | false | 18,172 | py | 138 | lib.py | 80 | 0.473531 | 0.457737 | 0 | 538 | 32.734201 | 110 |
BaotingKing/MyHouse | 13,520,557,062,098 | 6d622c77d6f00b7d9ac172469314e659dbf33df4 | 0bac49827e24ab081fb64ff931ee685d7e078f61 | /nnProject/DIY_script/web/flask.py | c686c78186631901231c45a7a56609821bfacc60 | [] | no_license | https://github.com/BaotingKing/MyHouse | 16f61f060892c1155369527253d79d1e2fd3662b | 91d2a98e8d0d9e6442083a43b5681fffe8735736 | refs/heads/master | 2022-05-04T17:07:53.256560 | 2019-11-04T10:06:09 | 2019-11-04T10:06:09 | 127,607,576 | 3 | 0 | null | false | 2022-03-29T21:56:29 | 2018-04-01T07:32:04 | 2020-04-05T15:29:29 | 2022-03-29T21:56:28 | 131,925 | 3 | 0 | 2 | Python | false | false | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: ZK
# Time: 2019/01/01
import sys
from flask import request, jsonify, Flask
from flask_apscheduler import APScheduler
class config(object):
JOBS = [
{
'id': 'job1',
'func': '__main__: get_one',
'trigger': 'interval',
'seconds': 10,
}
]
def get_one():
pass
app = Flask(__name__)
app.config.from_object(config())
@app.route('/admin/index', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return 'Hello word!'
elif request.method == 'GET':
return 'Hello my homeland!'
if __name__ == '__main__':
stdout_backup = sys.stdout # make a copy of original stdout route
print('This is a flask mini web, and it is only a case')
| UTF-8 | Python | false | false | 850 | py | 93 | flask.py | 78 | 0.532941 | 0.518824 | 0 | 39 | 19.641026 | 70 |
ofgulban/meso-MRI | 6,339,371,751,584 | 69e67ee4dd452a2a70c0555dfa94e9bfd06753df | 7b36801dd87a1df93b2836db74f68b5e00682638 | /scripts/01_MEGRE/11_fix_nondecay.py | 97249694d03c4fdb63095b591c0cae9a2ce59c36 | [
"BSD-3-Clause"
] | permissive | https://github.com/ofgulban/meso-MRI | c873bf227ae1048a84ffa7999c7ece72f3a8c3f8 | 2afd70a3bb7576f401dd98eeb07df38368f42baf | refs/heads/main | 2023-04-11T19:49:50.837267 | 2022-10-24T13:31:49 | 2022-10-24T13:31:49 | 327,944,487 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Detect voxels that does not decay over time."""
import os
import nibabel as nb
import numpy as np
# Parameters
NII_NAME = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/10_composite/sub-23_ses-T2s_part-mag_MEGRE_crop_ups2X_prepped_avg_composite.nii.gz"
OUTDIR = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/11_decayfix"
# =============================================================================
print("Step_11: Detect and fix non-decaying timepoints.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}".format(OUTDIR))
# =============================================================================
nii = nb.load(NII_NAME)
dims = nii.shape
data = nii.get_fdata()
data = np.abs(data)
idx = data != 0
data[idx] = np.log(data[idx])
# 1-neighbour fix
temp1 = np.zeros(dims[:-1])
for i in range(dims[3] - 1):
temp2 = data[..., i] - data[..., i+1]
idx = temp2 < 0
if (i > 0) and (i < dims[3] - 1):
data[idx, i] = (data[idx, i-1] + data[idx, i+1]) / 2
else:
temp1[idx] = 1
# Save
basename, ext = NII_NAME.split(os.extsep, 1)
basename = os.path.basename(basename)
img = nb.Nifti1Image(temp1, affine=nii.affine)
nb.save(img, os.path.join(OUTDIR, "{}_decaymask.nii.gz".format(basename)))
data = np.exp(data)
img = nb.Nifti1Image(data, affine=nii.affine)
nb.save(img, os.path.join(OUTDIR, "{}_decayfixed.nii.gz".format(basename)))
print('Finished.')
| UTF-8 | Python | false | false | 1,457 | py | 68 | 11_fix_nondecay.py | 64 | 0.593686 | 0.567605 | 0 | 49 | 28.734694 | 146 |
MyRobotLab/myrobotlab | 7,791,070,688,707 | 3b09dc4acadf8c7c8c5f7d8a9fb9f4a4a315cb96 | 09a7fa80d420634848b5e6af7b59353afd8c726b | /src/main/resources/resource/HttpClient/HttpClient.py | f9072207b0f503018f3ef4627cd3bed3140aac48 | [
"Apache-2.0",
"CC-BY-2.5"
] | permissive | https://github.com/MyRobotLab/myrobotlab | cf789956d9f97a98eead44faf7a8b61f70348dc3 | 0ecdc681b4928ab65649404779c095d352dd96b1 | refs/heads/develop | 2023-09-04T10:57:19.041683 | 2023-08-30T14:04:44 | 2023-08-30T14:04:44 | 18,051,302 | 213 | 114 | Apache-2.0 | false | 2023-09-07T14:14:58 | 2014-03-24T03:59:27 | 2023-08-30T19:00:09 | 2023-09-07T14:14:58 | 140,585 | 209 | 94 | 119 | Java | false | false | ################################################
# HttpClient service is a service wrapper of the Apache HttpClient
# So, you can download webpages, images, and a all sorts of
# goodies from the internet
http = runtime.start("http","HttpClient")
# blocking methods
# GETs
print(http.get("https://www.google.com"))
print(http.get("https://www.cs.tut.fi/~jkorpela/forms/testing.html"))
# POSTs
http.addFormField("Comments", "This is a different comment")
http.addFormField("Box", "yes")
http.addFormField("Unexpected", "this is an unexpected field")
http.addFormField("hidden field", "something else")
print(http.post("http://www.cs.tut.fi/cgi-bin/run/~jkorpela/echo.cgi"))
http.clearForm()
http.addFormField("NewField", "Value")
http.addFormField("name", "value")
# call-back methods
# step one add a listener
# you could also 'subscribe' to the appropriate methods
# e.g. python.subscribe('http','publishHttpData') &
# python subscript('http','publishHttpResponse') - the addListeners
# do the same thing
http.addHttpDataListener(python)
http.addHttpResponseListener(python)
# define the callback endpoints
def onHttpData(httpData):
print(httpData.uri)
print(httpData.contentType)
print(httpData.data)
print(httpData.responseCode)
def onHttpResponse(response):
print(response)
# make the request and the callbacks will be called when
# the method completes
http.post("http://www.cs.tut.fi/cgi-bin/run/~jkorpela/echo.cgi")
| UTF-8 | Python | false | false | 1,442 | py | 1,622 | HttpClient.py | 1,376 | 0.728155 | 0.728155 | 0 | 47 | 29.680851 | 71 |
fullonic/bookmarks | 11,132,555,275,152 | e44d02449ba909ae989d72b3d1fde2cdc66c27cd | bc00e301e2fd28014b186d5018205f9d6b849560 | /markers/signals.py | 374fd9bc923ecf4c328ac47000c042b7d66e6709 | [] | no_license | https://github.com/fullonic/bookmarks | d2a341249a86e37925b4f88b195d185e1851a7e0 | b398c15dfd8244bde78ac5efe203862ef6b5caf0 | refs/heads/master | 2023-03-21T05:37:29.016228 | 2021-03-14T21:42:30 | 2021-03-14T21:42:30 | 347,378,035 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .core import generate_tags
from django.db.utils import IntegrityError
# TODO: Remove Signals: Move this for a normal function to be call when saving a new bookmark
def add_tags_to_bookmark(sender, instance, **kwargs):
from .models import Tag
tag_list = Tag.objects.values_list("name", flat=True)
tags = generate_tags(instance.url, instance.title, tag_list)
for t in tags:
tag = Tag.objects.get(name=t)
instance.tags.add(tag)
| UTF-8 | Python | false | false | 465 | py | 28 | signals.py | 20 | 0.709677 | 0.709677 | 0 | 13 | 34.769231 | 93 |
SidAhmed01/E-commerce-Django-Project-Aroma-thems | 16,604,343,577,328 | 5897dc7ac01a52bd2288033cbc9ae2617595e5c9 | dab28e03c52e03966b8dd536cfd266167e1b2521 | /pages/views.py | 5e7604dc80ca31a1722b5d6d58aa251ff9f64b98 | [] | no_license | https://github.com/SidAhmed01/E-commerce-Django-Project-Aroma-thems | 666d7e29232876df71ae062a490048393b7a76d0 | 459f0ecb32729d5af889f94cf51a6797c52b8657 | refs/heads/main | 2023-06-27T21:07:52.475198 | 2021-07-31T11:09:46 | 2021-07-31T11:09:46 | 391,318,440 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, get_object_or_404
from .models import *
from django.core.paginator import Paginator
# Create your views here.
#global_variable
def index(request):
context = {
'product_list': Product.objects.all()
}
return render(request, 'pages/index.html', context)
def contact(request):
return render(request, 'pages/contact.html')
def shopcategory(request):
product_list = Product.objects.all()
paginator = Paginator(product_list, 4) # Show 25 contacts per page.
page = request.GET.get('page')
product_list = paginator.get_page(page)
context = {
'product_list': product_list
}
return render(request, 'products/shopcategory.html', context)
def productdetails(request, slug):
context = {
'product_details' : Product.objects.get( slug = slug),
}
return render(request, 'products/productdetails.html', context)
def confirmation(request):
return render(request, 'products/confirmation.html')
def shopingcart(request):
return render(request, 'products/shopingcart.html')
def productcheckout(request):
return render(request, 'products/productcheckout.html')
| UTF-8 | Python | false | false | 1,242 | py | 24 | views.py | 14 | 0.669082 | 0.664251 | 0 | 62 | 18.983871 | 71 |
Majiker/BalancedMetaSoftmax-InstanceSeg | 1,005,022,361,104 | 1dff6af7757d6051d3eca0a41ee41aa4e51ab551 | f2637c2fc89ecbfa7b1f50e84293e732a4aa2656 | /projects/BALMS/balms/build.py | 75b8ea8d298b3e36dd4524bdcdf6014630c27953 | [] | permissive | https://github.com/Majiker/BalancedMetaSoftmax-InstanceSeg | d1b44e95233c2320ed41ef802d26f1a7c280308d | 64fd32e56fdef3ac382364dccd3f647b0517d771 | refs/heads/main | 2023-02-20T23:41:19.896391 | 2021-01-19T09:04:02 | 2021-01-19T09:04:02 | 307,335,464 | 14 | 3 | Apache-2.0 | false | 2021-01-15T07:28:58 | 2020-10-26T10:26:07 | 2021-01-15T05:55:08 | 2020-11-22T04:15:57 | 865 | 6 | 3 | 1 | null | false | false | from detectron2.data.build import *
import logging
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from .distributed_sampler import ClassBalancedTrainingSampler
def build_detection_meta_loader(cfg, mapper=None):
"""
build the meta set from training data with Class Balanced Sampling
"""
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
logger = logging.getLogger(__name__)
logger.info("Using training sampler Class Balanced Sampler")
repeat_factors = ClassBalancedTrainingSampler.repeat_factors_by_inverse_category_frequency(dataset_dicts)
sampler = ClassBalancedTrainingSampler(repeat_factors)
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
) | UTF-8 | Python | false | false | 1,404 | py | 26 | build.py | 22 | 0.728632 | 0.725783 | 0 | 37 | 36.972973 | 109 |
SubashGupta/COVID-19-Face-Mask-Detector-with-OpenCV-and-Deep-Learning | 6,760,278,553,172 | e4fea10ba461bdd93cea9a099512f1fecde3b27a | f0c3f5e38242c31fe9e9cb2aaee8da669e2b8553 | /Mask_Image.py | 55781cedf8b2546b2b70c84a1c8a0a423adc3bea | [] | no_license | https://github.com/SubashGupta/COVID-19-Face-Mask-Detector-with-OpenCV-and-Deep-Learning | 7429bd833c9b861bd3b6ded8b139121579be5264 | 4f041b46fee07f012237f37e8c314786583db384 | refs/heads/main | 2023-05-31T09:15:45.871985 | 2021-06-13T13:29:14 | 2021-06-13T13:29:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
from tensorflow.keras.models import load_model
from keras.preprocessing.image import load_img , img_to_array
import numpy as np
import os
import matplotlib.pyplot as plt
prototxt = r'C:\Users\saiko\Desktop\deploy.prototxt'
weights_path = r'C:\Users\saiko\Desktop\SSD.caffemodel'
net = cv2.dnn.readNet(prototxt,weights_path)
deep_model =load_model(r'C:\Users\saiko\Desktop\new_improved_model.h5')
image = cv2.imread(r'C:\Users\saiko\Desktop\doublemask.jfif')
blob = cv2.dnn.blobFromImage(image,1.0,(300,300),(104.0,177.0,123.0))
#detecting faces
net.setInput(blob)
detections = net.forward()
(h,w) = image.shape[:2]
#look over the detections
for i in range(0,detections.shape[2]):
confidence = detections[0,0,i,2]
if confidence>0.5:
# we need x,y coordinates
box = detections[0,0,i,3:7]*np.array([w,h,w,h])
(startX,startY,endX,endY) = box.astype('int')
# we need to ensure bounding boxes fall within the dimensions of the frame
(startX,startY)=(max(0,startX),max(0,startY))
(endX,endY)=(min(w-1,endX), min(h-1,endY))
face=image[startY:endY, startX:endX]
face=cv2.cvtColor(face,cv2.COLOR_BGR2RGB)
face=cv2.resize(face,(300,300))
face=img_to_array(face)
face=np.expand_dims(face,axis=0)
prediction = deep_model.predict(face)
if prediction==0:
class_label = "Mask"
color = (0,255,0)
else:
class_label = "No Mask"
color = (0,0,255)
#display the label and bounding boxes
cv2.putText(image,class_label,(startX,startY-10),cv2.FONT_HERSHEY_SIMPLEX,0.45,color,2)
cv2.rectangle(image,(startX,startY),(endX,endY),color,2)
cv2.imshow("OutPut",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| UTF-8 | Python | false | false | 1,901 | py | 3 | Mask_Image.py | 1 | 0.623356 | 0.582851 | 0 | 71 | 24.774648 | 95 |
luliu31415926/google_code_jam | 523,986,011,776 | f6daa7e288460444b797ef2e95dac2e2382f1136 | 972f6c9810a541587e8f6d1f256a22903b77bcf7 | /2018_round1B/B.py | 3a22c16a721449139d25763df292300b4b1a0e48 | [] | no_license | https://github.com/luliu31415926/google_code_jam | 8439e1159a082cc2daeee5d2c6137539d091f09c | 7b797b41bbf24bdf1b70d47681b9b6c95822ef98 | refs/heads/master | 2021-04-12T11:05:42.505750 | 2018-04-29T20:04:22 | 2018-04-29T20:04:22 | 126,403,239 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import *
from collections import *
from bisect import *
def can_add(r,truth,pairs):
# check if signs[r] flows one of the truth, if only one truth, add another truth
m,n=signs[r]
if len(truth)==1:
m_,n_=truth[0]
if m!=m_ and n!=n_:
truth=[(m,n_),(m_,n)]
return True
else:
for m_,n_ in truth:
if m==m_ or n==n_:
return True
return False
def update(l,truth,pairs)
# remove the truth that accomodate signs[l]
m,n=pairs[l]
for key in truth.keys():
def solve(S,signs):
pairs=[(d+a,d-b) for d,a,b in signs]
l,r=0,1
truth=[pairs[0]]
# truth: accommodate signs
max_len=1
cnt=1
while r<S:
if can_add(r,truth,pairs):
if r-l+1>max_len:
max_len=r-l+1
cnt=1
elif r-l+1==max_len:
cnt+=1
r+=1
else:
l+=1
update(l,truth,pairs)
return max_len,cnt
cases=int(input())
for i in range(cases):
S=int(input())
signs=[ tuple(map(int,input().split())) for _ in range(S)]
print ("Case #%i: %s\n" %(i+1,solve(S,signs))) | UTF-8 | Python | false | false | 1,243 | py | 58 | B.py | 55 | 0.482703 | 0.470636 | 0 | 58 | 20.431034 | 85 |
maxlampe/ODSL-Data-Science-Course | 16,149,077,050,798 | 69ff11fbe91d9a656fa55a15f5942cf41b4f6a9a | 39f542c30553f9cd459682b80992db154b0ef98d | /day6_e1.py | d09030b67771f23958c0197c9aff2eb52a98a458 | [] | no_license | https://github.com/maxlampe/ODSL-Data-Science-Course | 9a4e973697c58c81bca011b06b1199f1065fdb8b | 3af88b6bc6bbf4de6a48e581778acabbc3f52262 | refs/heads/master | 2023-03-30T20:11:40.573739 | 2021-03-31T21:10:56 | 2021-03-31T21:10:56 | 345,808,617 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Daily assignment 6_1"""
import numpy as np
import matplotlib.pyplot as plt
from mathfunctions import gaussian
def metropolis(func, params, n_trials: int = 1000, prop_rng=None, x0: float = 0.0):
"""Metropolis Algorithm"""
if prop_rng is None:
prop_rng = [-1.0, 1.0]
x_curr = x0
accept = []
for i in range(n_trials):
u_1 = np.random.uniform(prop_rng[0], prop_rng[1])
u_2 = np.random.uniform()
y_curr = x_curr + u_1
rho = min(func(y_curr, **params) / func(x_curr, **params), 1.0)
if u_2 <= rho:
accept.append(y_curr)
x_curr = y_curr
else:
accept.append(x_curr)
accept = np.asarray(accept)
efficiency = accept.shape[0] / n_trials
print(f"# Accepted points: {100. * efficiency:0.2f}%")
return np.asarray([accept, efficiency])
n_sim = 100000
tests = [
[[-1.0, 1.0], 0.0],
[[-0.5, 0.5], 0.0],
[[-0.1, 0.1], 0.0],
[[-3.0, 3.0], 0.0],
[[-1.0, 1.0], 1.0],
[[-1.0, 0.5], 0.5],
]
for test in tests:
res = metropolis(
gaussian, {"mu": 0.0, "sig": 1.0}, n_sim, prop_rng=test[0], x0=test[1]
)
test.append(res[0])
test.append(res[1])
fig, axs = plt.subplots(3, 2, sharex=True, sharey=True, figsize=(13, 13))
fig.suptitle(f"Metropolis Gaussian Tests - {n_sim} Iterations")
x_vals = np.linspace(-4.5, 4.5, 1000)
bins = int(n_sim * 0.001)
for test_i, test in enumerate(tests):
axs.flat[test_i].hist(
test[2], bins=bins, range=[-4.5, 4.5], label="gen. RN", density=True
)
axs.flat[test_i].plot(x_vals, gaussian(x_vals), label="Unit. Gaussian")
axs.flat[test_i].legend()
axs.flat[test_i].set_xlabel("x [ ]")
axs.flat[test_i].set_ylabel("a.u. [ ]")
axs.flat[test_i].set_title(f"Test {test_i + 1}")
axs.flat[test_i].annotate(
f"prop. rng = {test[0]} \n" f"x0 = {test[1]}\n",
xy=(0.05, 0.95),
xycoords="axes fraction",
ha="left",
va="top",
bbox=dict(boxstyle="round", fc="1"),
)
plt.savefig("output/day6_e1.png", dpi=300)
plt.show()
| UTF-8 | Python | false | false | 2,095 | py | 21 | day6_e1.py | 21 | 0.550835 | 0.494511 | 0 | 77 | 26.207792 | 83 |
crazyYoda/logging | 16,252,156,251,042 | 594d2e1fe9cbdbda6d8e17ed832e1ef962ab58f9 | 816b5b1c01bf1ecf1268d0482d39beab436dc45c | /prj/news/models.py | 6df854fc029c1b3ece2572e858411186f276f87f | [] | no_license | https://github.com/crazyYoda/logging | 22f83cec5dd7c9df391513333d31524c84c7b4e6 | 82553bc05b2057b377145e14c69f6c8cae6479f6 | refs/heads/master | 2023-08-04T04:38:22.594459 | 2021-09-13T13:56:06 | 2021-09-13T13:56:06 | 401,168,190 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.shortcuts import reverse
class New(models.Model):
title = models.CharField(max_length=64)
text = models.TextField()
date_pub = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(max_length=128, unique=True)
def get_absolute_url(self):
return reverse('detail', kwargs={'slug': self.slug})
def __str__(self):
return '{}'.format(self.title)
| UTF-8 | Python | false | false | 455 | py | 4 | models.py | 4 | 0.648352 | 0.637363 | 0 | 15 | 28.066667 | 60 |
fopina/tgbot-buttiebot | 8,100,308,335,204 | de975c432e161970ab00f4b3d61c02313c32731f | 54cebd6687a52f70a62e836233895c81127b1223 | /instascrape.py | dcba778d2611f8ff8a21139cd848dfdab487850e | [] | no_license | https://github.com/fopina/tgbot-buttiebot | 87d61dd8034091f5c34a236a8e1517bc9ed294f8 | 3c4a83500fcfb9efdbd438ec17758549112eb392 | refs/heads/master | 2022-12-11T16:13:02.191261 | 2020-01-06T23:58:15 | 2020-01-06T23:58:15 | 49,206,924 | 0 | 1 | null | false | 2022-11-30T15:12:38 | 2016-01-07T13:43:27 | 2020-01-06T23:58:33 | 2022-06-03T22:43:19 | 115 | 0 | 1 | 2 | Python | false | false | #!/usr/bin/env python
from __future__ import print_function
import requests
import re
import json
PICLOAD = 50
HASH = '42323d64886122307be10013ad2dcc44'
def url_and_caption(media):
try:
c = media['edge_media_to_caption']['edges'][0]['node']['text']
except KeyError:
c = ''
media['caption'] = c
return media
def strip_pics(data):
return (
[
url_and_caption(x['node'])
for x in data['edges']
],
data['page_info']['has_next_page'],
data['page_info']['end_cursor']
)
def scrape(username):
s = requests.Session()
r = s.get('https://www.instagram.com/%s/?__a=1' % username)
j = r.json()['graphql']['user']
user_id = j['id']
pics, keepgoing, cursor = strip_pics(j['edge_owner_to_timeline_media'])
for pic in pics:
yield pic
while keepgoing:
r = s.get(
'https://www.instagram.com/graphql/query/',
params={
'query_hash': HASH,
'variables': '{"id":"%s","first":%d,"after":"%s"}' % (user_id, PICLOAD, cursor)
}
)
pics, keepgoing, cursor = strip_pics(json.loads(r.text)['data']['user']['edge_owner_to_timeline_media'])
for pic in pics:
yield pic
def main(args):
for u in args:
print('Scraping %s...' % u)
for p in scrape(u):
print(' - %s' % p)
print()
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| UTF-8 | Python | false | false | 1,499 | py | 8 | instascrape.py | 5 | 0.525684 | 0.506338 | 0 | 61 | 23.57377 | 112 |
xiyueyiwan/DP-AGD | 15,857,019,264,222 | 17b1c901f8caf14e38dd03d6cc2ad4cbe91f577d | 5568a0da6284fad46d0a61b6569e8f6c943a8147 | /algo/sgd.py | d0f4fe27309cb514a67e76823920a8ab15ae74e7 | [] | no_license | https://github.com/xiyueyiwan/DP-AGD | cb9ce03eeb77c8075f618706a518717057c38aa0 | 347bcbe751d189d5f3164c5fcf78f5a51642eab7 | refs/heads/master | 2020-04-08T18:49:53.697074 | 2018-11-29T07:48:05 | 2018-11-29T07:48:05 | 159,626,994 | 0 | 1 | null | true | 2018-11-29T07:40:39 | 2018-11-29T07:40:38 | 2018-11-14T08:13:30 | 2018-03-01T21:25:01 | 2,052 | 0 | 0 | 0 | null | false | null | import argparse
import numpy as np
from agd.common.svm import svm_grad
from agd.common.svm import svm_loss
from agd.common.svm import svm_test
from agd.common.gaussian_moments import compute_log_moment
from agd.common.gaussian_moments import get_privacy_spent
from agd.common.param import compute_advcomp_budget
from agd.common.param import compute_sigma
from agd.common.dat import load_dat
def dpsgd_ma(X, y, grad, sigma, T, step_size, batch_size, clip=4, delta=1e-8,
reg_coeff=0.0):
N, dim = X.shape
n = N * 1.0
# initialize the parameter vector
w = np.zeros(dim)
q = batch_size / n
# moments accountant
max_lmbd = 32
log_moments = []
for lmbd in xrange(1, max_lmbd+1):
log_moment = compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, _ = get_privacy_spent(log_moments, target_delta=delta)
for t in range(T):
# build a mini-batch
rand_idx = np.random.choice(N, size=batch_size, replace=False)
mini_X = X[rand_idx, :]
mini_y = y[rand_idx]
gt = grad(w, mini_X, mini_y, clip=clip)
gt += (sigma * clip) * np.random.randn(dim)
gt /= batch_size
# regularization
gt += reg_coeff * w
w -= step_size * gt
return w, eps
def dpsgd_adv(X, y, grad, eps, T, step_size, batch_size, clip=3, delta=1e-8,
reg_coeff=0.001):
N, dim = X.shape
n = N * 1.0
eps_iter, delta_iter = compute_advcomp_budget(eps, delta, T)
# initialization
w = np.zeros(dim)
q = batch_size / n
# privacy amplification by sampling
# (e, d)-DP => (2qe, d)-DP
eps_iter /= 2.0 * q
sigma = compute_sigma(eps_iter, delta_iter, 2.0*clip)
for t in range(T):
# build a mini-batch
rand_idx = np.random.choice(N, size=batch_size, replace=False)
mini_X = X[rand_idx, :]
mini_y = y[rand_idx]
gt = grad(w, mini_X, mini_y, clip=clip)
gt += sigma * np.random.randn(dim)
gt /= batch_size
gt += reg_coeff * w
w -= step_size * gt
return w
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='adaptive sgd')
parser.add_argument('dname', help='dataset name')
args = parser.parse_args()
# load the dataset
fpath = "../../../Experiment/Dataset/dat/{0}.dat".format(args.dname)
X, y = load_dat(fpath, minmax=(0, 1), normalize=False, bias_term=True)
y[y < 1] = -1
N, dim = X.shape
sigma = 4
batch_size = 1000
learning_rate = 0.05
reg_coeff = 0.001
print "SGD with moments accountant"
for T in [1, 100, 1000, 10000, 20000]:
w, eps = dpsgd_ma(X, y, svm_grad, sigma, T, learning_rate,
batch_size, reg_coeff=reg_coeff)
loss = svm_loss(w, X, y) / N
acc = svm_test(w, X, y)
print "[T={:5d}] eps: {:.5f}\tloss: {:.5f}\tacc: {:5.2f}".format(
T, eps, loss, acc*100)
print "\nSGD with advanced composition"
for eps in [0.05, 0.1, 0.2, 0.4, 0.8, 1.6]:
# used the same heuristic as in PrivGene
T = max(int(round((N * eps) / 500.0)), 1)
w = dpsgd_adv(X, y, svm_grad, eps, T, 0.1, batch_size)
loss = svm_loss(w, X, y) / N
acc = svm_test(w, X, y)
print "eps: {:4.2f}\tloss: {:.5f}\tacc: {:5.2f}".format(
eps, loss, acc*100)
| UTF-8 | Python | false | false | 3,405 | py | 13 | sgd.py | 12 | 0.571806 | 0.543612 | 0 | 119 | 27.613445 | 77 |
gitWK86/jeb_script | 8,392,366,136,465 | 526e532a712f7e5b8236c6235c8a6e6b09e21beb | 585441aeec68c235ae3f4ad9531fc1db98aac3bd | /samples/16 IDexUnit-CFG.py | d463b2e5d7249754857501d36f7b8344c863cc7d | [
"BSD-2-Clause"
] | permissive | https://github.com/gitWK86/jeb_script | 58c6b3f90b31a6cea13d7c33987436401202072b | 5d918a2abb7e50fbbb4c571122741d52b20959fd | refs/heads/main | 2023-01-06T12:32:44.750502 | 2020-10-28T11:26:44 | 2020-10-28T11:26:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from com.pnfsoftware.jeb.client.api import IClientContext
from com.pnfsoftware.jeb.core import IRuntimeProject
from com.pnfsoftware.jeb.core.units import IUnit
from com.pnfsoftware.jeb.core.units.code.android import IDexUnit
from com.pnfsoftware.jeb.core.units.code.android.dex import IDexMethodData, IDexCodeItem, IDexMethod
# 访问CFG
def Test(ctx):
assert isinstance(ctx,IClientContext)
input_path = r"D:\tmp\2\project\about_dex_diff\code\jsq\jsq.dex"
sign = "Lnet/cavas/show/aa;->compare(Ljava/lang/Object;Ljava/lang/Object;)I"
unit = ctx.open(input_path); assert isinstance(unit,IUnit)
prj = ctx.getMainProject(); assert isinstance(prj,IRuntimeProject)
dexUnit = prj.findUnit(IDexUnit); assert isinstance(dexUnit,IDexUnit);
method = dexUnit.getMethod(sign); assert isinstance(method,IDexMethod)
dexMethodData = method.getData(); assert isinstance(dexMethodData,IDexMethodData)
dexCodeItem= dexMethodData.getCodeItem(); assert isinstance(dexCodeItem,IDexCodeItem)
# 控制流图
print("-------------------------------------")
cfg = dexCodeItem.getControlFlowGraph()
print "01 Block >>> ",cfg.getBlocks() # 基本快列表
print "02 size >>> ",cfg.size() # 块个数
print "03 hasExit >>> ",cfg.hasExit() # 是否有出口
print "04 getEntryBlock >>> ",cfg.getEntryBlock() # 入口块
print "05 getExitBlocks >>> ",cfg.getExitBlocks() # 出口块(不唯一)
print "06 getLast >>> ",cfg.getLast() # 最后一个块
print "07 getAddressBlockMap >>> ",cfg.getAddressBlockMap() # map<偏移地址,块>
print "08 getEndAddress >>> ",hex(cfg.getEndAddress()) # 结尾指令地址
print "09 formatEdges >>> ",cfg.formatEdges() # 输出边(字符串)
# print " >>> ",cfg.doDataFlowAnalysis() # 执行数据流分析
# print " >>> ",cfg.getUseDefChains() # UD
# print " >>> ",cfg.getDefUseChains() # DU
# print " >>> ",cfg.getFullDefUseChains() # FDU
# print " >>> ",cfg.getFullUseDefChains() # FUD
# 输出
# 01 Block >>> [(0-10,5), (14-14,1), (16-16,1), (18-20,3), (24-26,2), (28-2A,2)]
# 02 size >>> 6
# 03 hasExit >>> True
# 04 getEntryBlock >>> (0-10,5)
# 05 getExitBlocks >>> [(16-16,1)]
# 06 getLast >>> (28-2A,2)
# 07 getAddressBlockMap >>> {0L: (0-10,5), 20L: (14-14,1), 22L: (16-16,1), 24L: (18-20,3), 36L: (24-26,2), 40L: (28-2A,2)}
# 08 getEndAddress >>> 0x2cL
# 09 formatEdges >>> (EDGES: 0->14, 0->18, 14->16, 18->24, 18->28, 24->16, 28->16)
# Done.
# 方法指令
# .method public final volatile bridge synthetic compare(Object, Object)I
# .registers 5
# 00000000 check-cast p1, b
# 00000004 check-cast p2, b
# 00000008 iget v0, p1, b->o:I
# 0000000C iget v1, p2, b->o:I
# 00000010 if-ge v0, v1, :18
# :14
# 00000014 const/4 v0, 1
# :16
# 00000016 return v0
# :18
# 00000018 iget v0, p1, b->o:I
# 0000001C iget v1, p2, b->o:I
# 00000020 if-le v0, v1, :28 # 1111111111
# :24
# 00000024 const/4 v0, -1
# 00000026 goto :16
# :28
# 00000028 const/4 v0, 0
# 0000002A goto :16
# .end method | UTF-8 | Python | false | false | 3,691 | py | 21 | 16 IDexUnit-CFG.py | 20 | 0.533801 | 0.447125 | 0 | 75 | 46.546667 | 124 |
Cassie-1/HtestApi_git | 19,439,022,016,023 | a607fdb308f31a4f13d796b1e0277f4560af0e87 | e74aa54cd742c55d62020b799672470d14d6ae20 | /util/getUserInfoUtil.py | 98176b2ffaf47314a4e21d2993e23ae3d548286a | [] | no_license | https://github.com/Cassie-1/HtestApi_git | cd3356f8afccd95c2e93bf46e86b407e02a59d6a | 8ac74fe0e85fea225e3c91486addbc44901312c1 | refs/heads/master | 2021-10-25T20:42:05.394728 | 2019-04-07T08:52:23 | 2019-04-07T08:52:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests,json
proxies = {'http':'http://localhost:8888'} #代理
headers={}
headers['Content-Type']='application/json;charset=UTF-8'
http=requests.session() #得到session运行在整个过程中
resp=http.post(url="http://192.168.1.203:8083/sys/login",
proxies=proxies,
headers=headers,
data='{"userName":"18210034706","password":"123456"}')
resp_dict = json.loads(resp.text) #json转换成python的dict对象
token = resp_dict['object']['token']
headers['token']=token
data={'token':token}
data_json=json.dumps(data) #将python对象dict转换成json
resp2=http.post(url="http://192.168.1.203:8083/sys/getUserInfo",
proxies=proxies,
headers=headers,
data=data_json
)
print(resp2.text)
print(resp2.url)
print(resp2.cookies)
print(resp2.headers)
print('http code:%s'%resp2.status_code) | UTF-8 | Python | false | false | 946 | py | 22 | getUserInfoUtil.py | 13 | 0.623608 | 0.561247 | 0 | 29 | 30 | 73 |
Reno-Greenleaf/tomb | 16,801,912,091,073 | 7e2025d95e663222f3ddd59cca83b35c99ba8cce | 6436a5077563b73ecd8cd4671ff7319f6fc1832d | /pool.py | 3b29c188db2f818925355ab781e7bf18b7f0fea1 | [
"MIT"
] | permissive | https://github.com/Reno-Greenleaf/tomb | 901c0e9206aac5177c43dee00b97420fb44ebe79 | 6a76f640e523903f32c5fa178295435a24289559 | refs/heads/master | 2020-05-24T17:55:02.112065 | 2019-07-20T19:20:42 | 2019-07-20T19:20:42 | 43,693,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from actor import Actor, Location, Passage, Switch, Ghost
from json import load
class Pool(dict):
""" Contains ingame objects. """
def fill(self):
with open('data/actors.json', 'r') as data:
actors = load(data)
for name, properties in actors.items():
self._build(properties, name)
with open('data/space.json', 'r') as data:
self.space = load(data)
def get_rooms(self):
return self.space
def _build(self, properties, name):
actor = Actor()
actor.load(properties)
if 'io' not in properties:
self[name] = actor
return
if 'labyrinth' in properties:
actor = Location(actor)
if 'labyrinth' in properties and 'right' in properties['labyrinth']:
actor = Passage(actor)
if 'access' in properties and 'used' in properties['access']:
actor = Switch(actor)
elif 'access' in properties:
actor = Ghost(actor)
self[name] = actor | UTF-8 | Python | false | false | 933 | py | 6 | pool.py | 6 | 0.636656 | 0.636656 | 0 | 38 | 23.578947 | 72 |
JakobTheDev/vs-code-remote-dev-demo | 11,209,864,659,692 | 387693198ff53dce19becd4509b9043dc3746e69 | 183d023d782889cb355ffbf52e833769f383ce6e | /setup.py | 603de1b8e8ecc7c656ca89580eadff6075380f31 | [
"MIT"
] | permissive | https://github.com/JakobTheDev/vs-code-remote-dev-demo | 2623d2144c3de81bf47da96305dacbd7387ff07b | cdf2810cb0425ad82d0ed8c4349378289d1f8537 | refs/heads/master | 2022-11-19T05:06:13.031980 | 2020-07-22T00:09:11 | 2020-07-22T00:09:11 | 281,300,863 | 8 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #####################
# IMPORTS
#####################
from setuptools import setup, find_packages
#####################
# SETUP
#####################
def dependencies(imported_file):
with open(imported_file) as file:
return file.read().splitlines()
with open("README.md") as file:
# PROVISION
setup(
name="demo",
url="https://jakob.pennington.io",
author="Jakob Pennington",
author_email="jakob@pennington.io",
version='1.0.0',
description="Runs sslscan as a demo for Kali development containers.",
packages=find_packages(),
entry_points={'console_scripts': ['demo = demo.demo:main']}
)
| UTF-8 | Python | false | false | 677 | py | 4 | setup.py | 2 | 0.549483 | 0.545052 | 0 | 24 | 27.208333 | 78 |
DaHuO/Supergraph | 14,370,960,575,092 | 8edb304245736aa8cb42ebd038b188e61413601d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_blankverse_B.py | 7c3b5f95beef489cae2c3cc7daa768af64bfce21 | [] | no_license | https://github.com/DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | false | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | 2019-02-21T15:42:08 | 2021-03-19T21:55:45 | 38,414 | 0 | 0 | 2 | Python | false | false | def solve(s):
c = 0
N = len(s)
for i in xrange(1,N):
if s[i] != s[i - 1]:
c += 1
if s[N - 1] == '-':
c += 1
return c
T = int(input())
for i in xrange(T):
s = raw_input()
print "Case #%d: %d" %(i + 1, solve(s))
| UTF-8 | Python | false | false | 239 | py | 30,073 | 16_0_2_blankverse_B.py | 16,513 | 0.430962 | 0.401674 | 0 | 14 | 16.071429 | 41 |
disarticulate/osm2pgsqlauto | 9,990,093,941,503 | fd7cfcbf1dae0585f0f811cfa7d746aa42fbb511 | dd5701ab404c7ba15b6fe0125377ef9312417e64 | /bin/postgres_wait.sh | 7f43f70c2a51edded67b8aa60011cfab2f494750 | [] | no_license | https://github.com/disarticulate/osm2pgsqlauto | 7f8b7ed7344e60838b1ce630537d1b3f8558004d | b83888a35ae1bb96a50a532a76a76fb6c32a4fcf | refs/heads/master | 2021-09-01T05:26:13.746003 | 2017-12-25T03:35:09 | 2017-12-25T03:35:09 | 112,108,589 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/local/bin/python3
import os
import time
import psycopg2
connection = "host='{POSTGRES_DB_HOST}' dbname='{POSTGRES_DB}' user='{POSTGRES_USER}' password='{POSTGRES_PASSWORD}'"
connection = connection.format(**os.environ)
while True:
try:
conn = psycopg2.connect(connection)
conn.close()
break
except psycopg2.OperationalError as ex:
print(f"Postgres at not available at {connection} failed: {ex}")
time.sleep(5) | UTF-8 | Python | false | false | 445 | sh | 42 | postgres_wait.sh | 18 | 0.710112 | 0.698876 | 0 | 16 | 26.875 | 117 |
cash2one/xai | 18,056,042,531,190 | 65025c38e722ee905138eb566b8ba8338b327485 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_staunch.py | 1a18cbf3d91647585167e57d4771edd4576e6235 | [
"MIT"
] | permissive | https://github.com/cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#calss header
class _STAUNCH():
def __init__(self,):
self.name = "STAUNCH"
self.definitions = [u'to stop something happening, or to stop liquid, especially blood, from flowing out: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| UTF-8 | Python | false | false | 365 | py | 37,275 | _staunch.py | 37,266 | 0.632877 | 0.627397 | 0 | 16 | 21.6875 | 110 |
mingo-x/colorfromlanguage | 5,368,709,127,506 | 758df7a065cfef13d3a242d764c96d365832afb3 | 455e213231d39eeeaaa049d2bc0d33a46a3dbd2d | /autocolorize_resnet.py | 5cecf5aa0a0e0400c14ab16f469a3bd217b35719 | [] | no_license | https://github.com/mingo-x/colorfromlanguage | 0e0adf60353e898e24f3b4d1c27e27b152fddc1c | f61d478679922be24c55e3afe7260b41afc6b915 | refs/heads/master | 2018-12-21T10:47:47.536749 | 2018-12-15T16:01:12 | 2018-12-15T16:01:12 | 149,338,913 | 0 | 0 | null | true | 2018-09-21T12:45:13 | 2018-09-18T19:06:59 | 2018-09-18T19:07:01 | 2018-09-21T12:45:13 | 109 | 0 | 0 | 0 | OpenEdge ABL | false | null | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from collections import defaultdict, Counter
import cPickle as pickle
import h5py as h5
import json
import numpy as np
import cv2
import string
import time
import random
import os, sys
import argparse
import scipy.ndimage.interpolation as sni
from skimage import io, color
from random import shuffle
from itertools import izip
import utils
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
def get_caption_encoder(ver, *args):
if 'lstm' in ver:
return CaptionEncoderLSTM(*args)
elif 'gru' in ver:
return CaptionEncoderGRU(*args)
# standard bilstm
class CaptionEncoderLSTM(nn.Module):
def __init__(self, word_embedding_dim, hidden_dim, vocab_size, train_vocab_embeddings, dropout=0.2, emb_freeze=True):
super(CaptionEncoderLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, word_embedding_dim)
self.embedding.weight.data.copy_(torch.from_numpy(train_vocab_embeddings))
self.embedding.weight.requires_grad = (not emb_freeze)
self.hidden_size = hidden_dim / 2
self.lstm = nn.LSTM(word_embedding_dim, self.hidden_size, num_layers=1,
bidirectional=True, batch_first=True)
self.dropout = nn.Dropout(p=dropout)
utils.init_modules([self.lstm])
print('LSTM caption encoder, dropout {}, embedding frozen {}'.format(dropout, emb_freeze))
def forward(self, captions, lens):
bsz, max_len = captions.size()
embeds = self.dropout(self.embedding(captions))
lens, indices = torch.sort(lens, 0, True)
_, (enc_hids, _) = self.lstm(pack(embeds[indices], lens.tolist(), batch_first=True))
enc_hids = torch.cat((enc_hids[0], enc_hids[1]), 1)
_, _indices = torch.sort(indices, 0)
enc_hids = enc_hids[_indices]
return enc_hids
class CaptionEncoderGRU(nn.Module):
def __init__(self, word_embedding_dim, hidden_dim, vocab_size, train_vocab_embeddings, dropout=0.2, emb_freeze=True):
super(CaptionEncoderGRU, self).__init__()
self.embedding = nn.Embedding(vocab_size, word_embedding_dim)
self.embedding.weight.data.copy_(torch.from_numpy(train_vocab_embeddings))
self.embedding.weight.requires_grad = (not emb_freeze)
self.hidden_size = hidden_dim / 2
self.gru = nn.GRU(
word_embedding_dim,
self.hidden_size,
num_layers=1,
bidirectional=True,
batch_first=True)
self.dropout = nn.Dropout(p=dropout)
utils.init_modules([self.gru])
print('GRU caption encoder, dropout {}, embedding frozen {}'.format(dropout, emb_freeze))
def forward(self, captions, lens):
bsz, max_len = captions.size()
embeds = self.dropout(self.embedding(captions))
lens, indices = torch.sort(lens, 0, True)
_, enc_hids = self.gru(pack(embeds[indices], lens.tolist(), batch_first=True))
enc_hids = torch.cat((enc_hids[0], enc_hids[1]), 1)
_, _indices = torch.sort(indices, 0)
enc_hids = enc_hids[_indices]
return enc_hids
class FiLM(nn.Module):
"""
A Feature-wise Linear Modulation Layer from
'FiLM: Visual Reasoning with a General Conditioning Layer'
How this layer works :
x = Variable(torch.randn(2, 64, 32 ,32))
gammas = Variable(torch.randn(2, 64)) # gammas and betas have to be 64
betas = Variable(torch.randn(2, 64))
y = film(x, gammas, betas)
print y.size()
y is : [2, 64, 32, 32]
"""
def forward(self, x, gammas, betas):
gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)
betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)
return (gammas * x) + betas
class FiLMV1(nn.Module):
def forward(self, x, gammas, betas):
gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)
betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)
return (gammas + 1) * x + betas
class FiLMWithAttn(nn.Module):
def forward(self, x, gammas, betas, sa):
gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)
betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)
return ((gammas + sa) * x) + betas
class FilMedResBlock(nn.Module):
expansion = 1
'''
A much simplified version
'''
def __init__(self, in_dim, out_dim, stride=1, padding=1, dilation=1):
super(FilMedResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_dim, in_dim, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1,
dilation=dilation) # bias=False? check what perez did
self.bn2 = nn.BatchNorm2d(out_dim)
self.film = FiLM()
init_modules(self.modules())
def forward(self, x, gammas, betas):
out = x
out = F.relu(self.conv1(out))
out = self.bn2(F.relu(self.conv2(out)))
out = F.relu(self.film(out, gammas, betas))
out += x
return out
class AutocolorizeResnet(nn.Module):
def __init__(self, vocab_size, feature_dim=(512, 28, 28), d_hid=256, d_emb=300, num_modules=4, num_classes=625, train_vocab_embeddings=None):
super(AutocolorizeResnet, self).__init__()
self.num_modules = num_modules
self.n_lstm_hidden = d_hid
self.block = FilMedResBlock
self.in_dim = feature_dim[0]
self.num_classes = num_classes
dilations = [1, 1, 1, 1]
self.caption_encoder = CaptionEncoder(d_emb, d_hid, vocab_size, train_vocab_embeddings)
# self.function_modules = {}
# for fn_num in range(self.num_modules):
# self.add_module(str(fn_num), mod)
# self.function_modules[fn_num] = mod
self.mod1 = self.block(self.in_dim, self.in_dim, dilations[0])
self.mod2 = self.block(self.in_dim, self.in_dim, dilations[1])
self.mod3 = self.block(self.in_dim, self.in_dim, dilations[2])
self.mod4 = self.block(self.in_dim, self.in_dim, dilations[3])
# put this in a loop later # there's an *2 because of bilstm and because of film
self.dense_film_1 = nn.Linear(self.n_lstm_hidden * 2, self.in_dim * 2)
self.dense_film_2 = nn.Linear(self.n_lstm_hidden * 2, self.in_dim * 2)
self.dense_film_3 = nn.Linear(self.n_lstm_hidden * 2, self.in_dim * 2)
# out = x # 2x512x28x28
# out = F.relu(self.conv1(out)) # 2x512x28x28
self.dense_film_4 = nn.Linear(self.n_lstm_hidden * 2, self.in_dim * 2)
print(self.dense_film_4.weight.is_cuda)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.classifier = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, dilation=1)
def forward(self, x, captions, caption_lens):
caption_features = self.caption_encoder(captions, caption_lens)
# print(caption_features.is_cuda)
# out = F.relu(self.bn1(self.conv1(x)))
# print(self.dense_film_1.weight.is_cuda)
dense_film_1 = self.dense_film_1(caption_features)
dense_film_2 = self.dense_film_2(caption_features)
dense_film_3 = self.dense_film_3(caption_features)
dense_film_4 = self.dense_film_4(caption_features) # bsz * 128
gammas1, betas1 = torch.split(dense_film_1, self.in_dim, dim=-1)
gammas2, betas2 = torch.split(dense_film_2, self.in_dim, dim=-1)
gammas3, betas3 = torch.split(dense_film_3, self.in_dim, dim=-1)
gammas4, betas4 = torch.split(dense_film_4, self.in_dim, dim=-1)
out = self.mod1(x, gammas1, betas1) # out is 2x512x28x28
out = self.mod2(out, gammas2, betas2) # out is 2x512x28x28
out = self.mod3(out, gammas3, betas3)
out_last = self.mod4(out, gammas4, betas4)
out = self.upsample(out_last)
out = self.classifier(out)
out = out.permute(0, 2, 3, 1).contiguous()
out = out.view(-1, self.num_classes)
return out, out_last
def train(minibatches, net, optimizer, epoch, prior_probs, img_save_folder):
stime = time.time()
for i, (batch_start, batch_end) in enumerate(minibatches):
img_bgrs = train_origs[batch_start:batch_end]
img_labs = np.array([cvbgr2lab(img_bgr) for img_bgr in img_bgrs])
if args.vgg:
img_ls = img_labs[:, :, :, 0: 1] / 50. - 1. # [-1, 1]
input_ = torch.from_numpy(np.transpose(img_ls, (0, 3, 1, 2)))
else:
input_ = torch.from_numpy(train_ims[batch_start:batch_end])
target = torch.from_numpy(lookup_enc.encode_points(img_labs[:, ::4, ::4, 1:]))
# rand_idx = np.random.randint(5) # 5 captions per batch
input_captions_ = train_words[batch_start:batch_end]
input_lengths_ = train_lengths[batch_start:batch_end]
# for now just choose first caption
input_captions = Variable(torch.from_numpy(
input_captions_.astype('int32')).long().cuda())
input_caption_lens = torch.from_numpy(
input_lengths_.astype('int32')).long().cuda()
input_ims = Variable(input_.float().cuda())
target = Variable(target.long()).cuda()
optimizer.zero_grad()
output, _ = net(input_ims, input_captions, input_caption_lens)
loss = loss_function(output, target.view(-1))
loss.backward()
optimizer.step()
if i % 50 == 0:
print 'loss at epoch %d, batch %d / %d = %f, time: %f s' % \
(epoch, i, len(minibatches), loss.data[0], time.time() - stime)
stime = time.time()
if True: # args.logs:
# softmax output and multiply by grid
dec_inp = nn.Softmax()(output) # 12544x625
AB_vals = dec_inp.mm(cuda_cc) # 12544x2
# reshape and select last image of batch]
AB_vals = AB_vals.view(len(img_labs), 56, 56, 2)[-1].data.cpu().numpy()[None, :, :, :]
AB_vals = cv2.resize(AB_vals[0], (224, 224),
interpolation=cv2.INTER_CUBIC)
img_dec = labim2bgr(np.dstack((np.expand_dims(img_labs[-1, :, :, 0], axis=2), AB_vals)))
# img_labs_tosave = labim2rgb(img_labs[-1])
word_list = list(input_captions_[-1, :input_lengths_[-1]])
words = '_'.join(vrev.get(w, 'unk') for w in word_list)
cv2.imwrite('%s/%d_%d_bw.jpg' % (img_save_folder, epoch, i),
cv2.cvtColor(img_bgrs[-1].astype('uint8'),
cv2.COLOR_BGR2GRAY))
cv2.imwrite('%s/%d_%d_color.jpg' % (img_save_folder, epoch, i),
img_bgrs[-1].astype('uint8'))
cv2.imwrite('%s/%d_%d_rec_%s.jpg' % (img_save_folder, epoch, i, words),
img_dec.astype('uint8'))
if i == 0:
torch.save({
'epoch': epoch + 1,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'loss': loss.data[0],
}, args.model_save_file + '_' + str(epoch) + '_' + str(i) + '.pth.tar')
return net
def scale_attention_map(x):
x = (x - np.min(x)) / (np.max(x) - np.min(x))
y = x * 255.
y = cv2.cvtColor(y.astype('uint8'), cv2.COLOR_GRAY2RGB).astype('uint8')
y = cv2.applyColorMap(y, cv2.COLORMAP_JET)
return cv2.resize(y, (224, 224), interpolation=cv2.INTER_LANCZOS4)
def evaluate_attention_maps(minibatches, net, epoch, img_save_folder, save_every=20):
stime = time.time()
c = Counter()
val_full_loss = 0.
val_masked_loss = 0.
val_loss = 0.
n_val_ims = 0
for i, (batch_start, batch_end) in enumerate(val_minibatches):
img_rgbs = val_origs[batch_start:batch_end]
img_labs = np.array([cvrgb2lab(img_rgb) for img_rgb in img_rgbs])
input_ = torch.from_numpy(val_ims[batch_start:batch_end])
gt_abs = img_labs[:, ::4, ::4, 1:]
target = torch.from_numpy(lookup_enc.encode_points(gt_abs))
input_captions_ = val_words[batch_start:batch_end]
input_lengths_ = val_lengths[batch_start:batch_end]
input_captions = Variable(torch.from_numpy(\
input_captions_.astype('int32')).long().cuda())
input_caption_lens = torch.from_numpy(\
input_lengths_.astype('int32')).long().cuda()
input_ims = Variable(input_.float().cuda())
target = Variable(target.long()).cuda()
output, output_maps = net(input_ims, input_captions, input_caption_lens)
# softmax output and multiply by grid
dec_inp = nn.Softmax()(output) # 12544x625
AB_vals = dec_inp.mm(cuda_cc) # 12544x2
# reshape and select last image of batch]
AB_vals = AB_vals.view(len(img_labs), 56, 56, 2).data.cpu().numpy()
n_val_ims += len(AB_vals)
for k, (img_rgb, AB_val) in enumerate(zip(img_rgbs, AB_vals)):
# attention stuff
AB_val = cv2.resize(AB_val, (224, 224),
interpolation=cv2.INTER_CUBIC)
img_dec = labim2rgb(np.dstack((np.expand_dims(img_labs[k, :, :, 0], axis=2), AB_val)))
val_loss += error_metric(img_dec, img_rgb)
if k == 0 and i%save_every == 0:
output_maps = torch.mean(output_maps, dim=1)
output_maps = output_maps.data.cpu().numpy()
output_maps = scale_attention_map(output_maps[k])
word_list = list(input_captions_[k, :input_lengths_[k]])
words = '_'.join(vrev.get(w, 'unk') for w in word_list)
img_labs_tosave = labim2rgb(img_labs[k])
cv2.imwrite('%s/%d_%d_bw.jpg'%(img_save_folder, epoch, i),
cv2.cvtColor(img_rgbs[k].astype('uint8'),
cv2.COLOR_RGB2GRAY))
cv2.imwrite('%s/%d_%d_color.jpg'%(img_save_folder, epoch, i),
img_rgbs[k].astype('uint8'))
cv2.imwrite('%s/%d_%d_rec_%s.jpg'%(img_save_folder, epoch, i, words),
img_dec.astype('uint8'))
cv2.imwrite('%s/%d_%d_att.jpg'%(img_save_folder, epoch, i), output_maps)
return val_loss / len(val_minibatches) # , val_masked_loss / len(val_minibatches)
def evaluate(minibatches, net, epoch, img_save_folder, save_every=20):
stime = time.time()
val_loss = 0.
n_val_ims = 0
for i, (batch_start, batch_end) in enumerate(val_minibatches):
img_bgrs = val_origs[batch_start:batch_end]
img_labs = np.array([cvbgr2lab(img_bgr) for img_bgr in img_bgrs])
if args.vgg:
img_ls = img_labs[:, :, :, 0: 1] / 50. - 1.
input_ = torch.from_numpy(np.transpose(img_ls, (0, 3, 1, 2)))
else:
input_ = torch.from_numpy(val_ims[batch_start:batch_end])
gt_abs = img_labs[:, ::4, ::4, 1:]
target = torch.from_numpy(lookup_enc.encode_points(gt_abs))
input_captions_ = val_words[batch_start:batch_end]
input_lengths_ = val_lengths[batch_start:batch_end]
input_captions = Variable(torch.from_numpy(input_captions_.astype('int32')).long().cuda())
input_caption_lens = torch.from_numpy(input_lengths_.astype('int32')).long().cuda()
input_ims = Variable(input_.float().cuda())
target = Variable(target.long()).cuda()
output, _ = net(input_ims, input_captions, input_caption_lens)
# softmax output and multiply by grid
dec_inp = nn.Softmax()(output)
AB_vals = dec_inp.mm(cuda_cc)
# reshape and select last image of batch]
AB_vals = AB_vals.view(len(img_labs), 56, 56, 2).data.cpu().numpy()
n_val_ims += len(AB_vals)
for k, (img_bgr, AB_val) in enumerate(zip(img_bgrs, AB_vals)):
AB_val = cv2.resize(AB_val, (224, 224), interpolation=cv2.INTER_CUBIC)
img_dec = labim2bgr(np.dstack((np.expand_dims(img_labs[k, :, :, 0], axis=2), AB_val)))
val_loss += error_metric(img_dec, img_bgr)
if k == 0 and i % save_every == 0:
word_list = list(input_captions_[k, :input_lengths_[k]])
words = '_'.join(vrev.get(w, 'unk') for w in word_list)
cv2.imwrite('%s/%d_%d_bw.jpg' % (img_save_folder, epoch, i),
cv2.cvtColor(img_bgrs[k].astype('uint8'),
cv2.COLOR_BGR2GRAY))
cv2.imwrite('%s/%d_%d_color.jpg' % (img_save_folder, epoch, i),
img_bgrs[k].astype('uint8'))
cv2.imwrite('%s/%d_%d_rec_%s.jpg' % (img_save_folder, epoch, i, words),
img_dec.astype('uint8'))
print("Eval {0} time {1}".format(epoch, time.time() - stime))
return val_loss / len(val_minibatches) # , val_masked_loss / len(val_minibatches)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='resnet coco colorization')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--start-epoch', '-se', type=int, default=0, help='starting epoch')
parser.add_argument('--end-epoch', '-ee', type=int, default=30, help='ending epoch')
parser.add_argument('--gpuid', '-g', default='0', type=str, help='which gpu to use')
parser.add_argument('--batch_size', '-b', default=24, type=int, help='batch size')
parser.add_argument('--d_emb', default=300, type=int, help='word-embedding dimension')
parser.add_argument('--d_hid', default=150, type=float, help='lstm hidden dimension')
parser.add_argument('--h5_file', help='h5 file which contains everything except features')
parser.add_argument('--features_file', help='h5 file which contains features')
parser.add_argument('--vocab_file_name', default='./priors/coco_colors_vocab.p', help='vocabulary file')
parser.add_argument('--image_save_folder', help='prefix of the folders where images are stored')
parser.add_argument('--model_save_file', help='prefix of the model save file')
parser.add_argument('--save_attention_maps', default=0, help='save maps as well')
parser.add_argument('--vgg', default=False, type=bool, help='Use VGG architecture.')
parser.add_argument('--weights', default='', type=str, help='Pretrained weights.')
parser.add_argument('--grid_file', default='./priors/full_lab_grid_10.npy', type=str, help='Grid file.')
parser.add_argument('--prior_file', default='./priors/coco_priors_onehot_625.npy', type=str, help='Priors file.')
parser.add_argument('--nclasses', '-nc', type=int, default=625, help='Number of classes.')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid
train_vocab = pickle.load(open(args.vocab_file_name, 'r'))
train_vocab_embeddings = pickle.load(open('/srv/glusterfs/xieya/data/w2v_embeddings_colors.p', 'r'))
if args.vgg:
print("Using VGG structure.")
else:
print("Using ResNet structure.")
# seeds
# torch.manual_seed(1000)
# torch.cuda.manual_seed(1000)
# random.seed(1000)
# np.random.seed(1000)
# initialize quantized LAB encoder
lookup_enc = LookupEncode(args.grid_file)
# num_classes = lookup_enc.cc.shape[0]
cuda_cc = Variable(torch.from_numpy(lookup_enc.cc).float().cuda())
hfile = args.h5_file
hf = h5.File(hfile, 'r')
features_file = args.features_file
ff = h5.File(features_file, 'r')
# color rebalancing
alpha = 1.
gamma = 0.5
gradient_prior_factor = Variable(torch.from_numpy(
prior_boosting(args.prior_file, alpha, gamma)).float().cuda())
print 'rebalancing'
loss_function = nn.CrossEntropyLoss(weight=gradient_prior_factor)
vrev = dict((v, k) for (k, v) in train_vocab.iteritems())
n_vocab = len(train_vocab)
if args.vgg:
net = AutocolorizeVGG(n_vocab, train_vocab_embeddings=train_vocab_embeddings, num_classes=args.nclasses)
if args.weights != '':
# Load pretrained weights.
if os.path.isfile(args.weights):
print("=> loading pretrained weights '{}'".format(args.weights))
weights = torch.load(args.weights)
net.load_state_dict(weights['state_dict'])
else:
print("=> no weights found at '{}'".format(args.weights))
else:
net = AutocolorizeResnet(n_vocab, train_vocab_embeddings=train_vocab_embeddings) # leave other stuff at default values
net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=(1e-3 if args.vgg else 0))
train_origs = hf['train_ims']
train_ims = ff['train_features']
train_words = hf['train_words']
train_lengths = hf['train_length']
assert len(train_origs) == len(train_ims)
val_origs = hf['val_ims']
val_ims = ff['val_features']
val_words = hf['val_words']
val_lengths = hf['val_length']
assert len(val_origs) == len(val_ims)
n_train_ims = len(train_origs)
minibatches = produce_minibatch_idxs(n_train_ims, args.batch_size)[:-1]
n_val_ims = len(val_origs)
val_minibatches = produce_minibatch_idxs(n_val_ims, args.batch_size)[:-1]
val_img_save_folder = args.image_save_folder + '_val'
if not os.path.exists(val_img_save_folder):
os.makedirs(val_img_save_folder)
img_save_folder = args.image_save_folder + '_train'
if not os.path.exists(img_save_folder):
os.makedirs(img_save_folder)
print 'start training ....'
for epoch in range(args.start_epoch, args.end_epoch):
random.shuffle(minibatches)
random.shuffle(val_minibatches)
net = train(minibatches, net, optimizer, epoch, gradient_prior_factor, img_save_folder)
t = time.time()
if args.save_attention_maps == 0:
val_full_loss = evaluate(val_minibatches, net, epoch, val_img_save_folder)
else:
val_full_loss = evaluate_attention_maps(val_minibatches, net, epoch, val_img_save_folder)
print 'full image rmse: %f' % (val_full_loss)
| UTF-8 | Python | false | false | 23,475 | py | 9 | autocolorize_resnet.py | 2 | 0.574015 | 0.554079 | 0 | 523 | 43.885277 | 145 |
alerin345/Instagram | 4,784,593,592,612 | d9313e52d0624bdd4d640a0083e0b03682f7ab1d | ea2cdb09ca80f06c874741f07926954547c10b5c | /users/models.py | 0d19652cfec3f8c893f93f2fc373a25165dd5588 | [
"MIT"
] | permissive | https://github.com/alerin345/Instagram | 7d34af6f047ee3d9f6ae7afb04dc38c9343a0960 | 082e4a64042ae94f3eacfc10144f925e3dfc2492 | refs/heads/master | 2023-02-19T15:12:44.883976 | 2021-01-06T23:20:23 | 2021-01-06T23:20:23 | 323,971,176 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
picture = models.ImageField(default="default.png",null=True, blank=True)
description = models.TextField(default="",blank=True)
class Image(models.Model):
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
picture = models.ImageField(null=True)
description = models.TextField(default="",blank=True)
likes = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
class Like(models.Model):
image = models.ForeignKey(Image, null=True, on_delete=models.CASCADE)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(fields=['image', 'user'], name='unique likes')
]
class Comment(models.Model):
image = models.ForeignKey(Image, null=True, on_delete=models.CASCADE)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
value = models.TextField(blank=False)
date = models.DateTimeField(default=timezone.now)
class Subscription(models.Model):
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="user")
userSubscribed = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="userSubscribed")
class Meta:
constraints = [
models.UniqueConstraint(fields=['user', 'userSubscribed'], name='unique subscribes')
]
| UTF-8 | Python | false | false | 1,684 | py | 31 | models.py | 21 | 0.720309 | 0.719121 | 0 | 39 | 42.179487 | 112 |
ElviraUz/arena | 14,998,025,824,065 | 71da50a3b8aa4f8ca48695116fc5f7b136886c16 | 13bbfb9a36911675a8c12f2b20a05480f6088411 | /load_data.py | 02b18aeff52b2380b43e4b043992ad6b7acf630e | [] | no_license | https://github.com/ElviraUz/arena | a0f689462fc034ddb1fd3a95bbf72f9e85147901 | 31814014f8be6b9d2509942193029978a69b1dd4 | refs/heads/master | 2022-11-17T08:15:11.151077 | 2020-07-12T08:24:26 | 2020-07-12T08:24:26 | 271,096,899 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from webapp import create_app
from webapp.model import db, Arenas
from webapp import db, Arenas
import json
app = create_app()
db.init_app(app)
with open("data_file.json", "rb") as read_file:
data = json.load(read_file)
properties = data['features']
def import_data(name, adress, website, phones, hours24, description, image,
image2, image3, metro, everyday, vk, instagram, twich):
arena = Arenas(name=name,
adress=adress,
website=website,
phones=phones,
hours24=hours24,
description="Test Descrption",
image="image.jpg",
image2="image.jpg",
image3="image.jpg",
metro="Киберспортивная",
everyday=everyday,
vk=vk,
instagram=instagram,
twich=twich)
db.session.add(arena)
db.session.commit()
def get_phones(prop):
phones = prop.get("properties", {}).get("CompanyMetaData", {}).get("Phones", {})
if isinstance(phones, list):
return ", ".join([phone.get("formatted") for phone in phones])
def get_is_24(prop):
hours24 = prop.get("properties", {}).get("CompanyMetaData", {}).get("Hours", {}).get('Availabilities',{})
if isinstance(hours24, list):
return hours24[0].get("TwentyFourHours", False)
return False
def get_everyday(prop):
everyday = prop.get("properties", {}).get("CompanyMetaData", {}).get("Hours", {}).get('Availabilities',{})
if isinstance(everyday, list):
return everyday[0].get("Everyday", False)
return False
with app.app_context():
for prop in properties:
name = str(prop.get("properties", {}).get("CompanyMetaData", {}).get("name", {}))
adress = str(prop.get("properties", {}).get("description", {}))
website = str(prop.get("properties", {}).get("CompanyMetaData", {}).get('url'))
phones = str(get_phones(prop))
hours24 = get_is_24(prop)
everyday = get_everyday(prop)
import_data(name=name,
adress=adress,
website=website,
phones=phones,
hours24=hours24,
description="Test Descrption",
image="image.jpg",
image2="image.jpg",
image3="image.jpg",
metro="Metro",
everyday=everyday,
vk="vk.com",
instagram="instagram.com",
twich="twich.com")
# цикл in обходит файл с импортированными аренами и записывает их в базу данных
| UTF-8 | Python | false | false | 2,697 | py | 13 | load_data.py | 6 | 0.559374 | 0.547919 | 0 | 79 | 32.151899 | 110 |
rubenCumbreno/startmeapp-hackaton | 7,164,005,471,955 | 4a52a0cf9486bb1ac510cef95e26c0a44ac2e910 | dae30b231bf194c9760979ed17afc5933eb7bae8 | /sentiment_analist/sentiment_analysis.py | f4c8796ae2e7c57762ff24bb5188a6baf6b5d22c | [] | no_license | https://github.com/rubenCumbreno/startmeapp-hackaton | 07ed109ee744c7064428672b7691a43b5cad23c6 | 47d8735154ccc0934229370fa2f90f3a551f5d42 | refs/heads/master | 2020-04-11T19:04:20.633816 | 2018-12-16T16:47:37 | 2018-12-16T16:47:37 | 162,021,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#Se importa TextBlob
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import logging #libreria para el logging
import os
import sys
import time
class SentimenAnalysis(object):
def __init__(self):
self.logger = logging.getLogger('main')
self.config_logging()
def config_logging(self):
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s [%(threadName)s %(module)s %(funcName)s line:%(lineno)s] %(levelname)s: %(message)s',
'%Y-%m-%d %H:%M:%S')
log_file_name = 'sentiment.log'
handler = logging.FileHandler(os.path.join('./logs', log_file_name))
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.DEBUG)
self.logger.handlers = []
self.logger.addHandler(handler)
self.logger.addHandler(console)
def main(self):
print("Probando sentiment analysis entre 0-1")
texto = 'No tengo amigos'
print(self.sentiment_analysis(texto))
def sentiment_analysis(self, texto):
analisis = TextBlob(texto)
#idioma = analisis.detect_language()
# meter libreria para faltas ortografia
traduccion = analisis.translate(to='en')
analyzer = SentimentIntensityAnalyzer()
vs = analyzer.polarity_scores(traduccion)
dic = {'mal': -3, 'pegar':-3, 'amenazar':-3, 'no':-3, 'amigos':-3}
valor = 0
words = texto.lower().split(' ')
for word in words:
if word in dic:
if word == 'amigos':
if 'no' in words:
valor+= dic[word]
continue
valor += dic[word]
return vs
if __name__ == '__main__':
sentiment = SentimenAnalysis()
sentiment.main() | UTF-8 | Python | false | false | 1,803 | py | 4 | sentiment_analysis.py | 3 | 0.660566 | 0.655574 | 0 | 72 | 23.069444 | 101 |
MalSemik/nauka-5.0 | 4,793,183,514,089 | a54fe577f01c26e24c3b141f260281ba98a1658b | d764722e49a0394a4ca5688988f98a31fa3d98a7 | /regular_expressions.py | 04fae656bf72406bc375cad4fae3a1e4f2df2ff0 | [] | no_license | https://github.com/MalSemik/nauka-5.0 | 5744e8853beb517dfac4d19f415b757197d1d0e5 | 13b825ef2994b8c3155da7d9513eadcf0f5e66d6 | refs/heads/master | 2020-05-04T23:36:49.985653 | 2019-10-23T16:44:23 | 2019-10-23T16:44:23 | 179,549,485 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
date_pattern = r"[0123]?\d[-./]\d{1,2}[-./]\d{1,4}"
example_date_str = "01.22.1995"
bad_date_str = "01.33/dupa"
if re.match(date_pattern, example_date_str):
print("Matches")
if re.match(date_pattern,bad_date_str):
print(f"{bad_date_str} matches pattern {date_pattern}")
else:
print(f"{bad_date_str} doesn't match pattern {date_pattern}")
print([letter for letter in 'ale jajca panie ferdku'])
print(re.search("(a)(b)(c)","abc").groups())
print(re.sub("(.*)=(.*)",r"\2=\1","dupa=10")) | UTF-8 | Python | false | false | 511 | py | 48 | regular_expressions.py | 41 | 0.630137 | 0.58317 | 0 | 19 | 25.947368 | 65 |
dl0312/PS | 13,804,024,896,648 | 0e0229a7c260eb05f8f9ea5d0798418bb53deceb | 026e2c312171158100445102ddb0c4d16e68f294 | /programmers/heap/rameaum.py | f8d77597e5a83c68ec090c9a71d38a6c0270ea73 | [] | no_license | https://github.com/dl0312/PS | 46e561099582957de8fca5d55d98f34f02bb1a9e | 05e6fd30c932b5013cd0274119ec4002f68d3e70 | refs/heads/master | 2020-05-09T13:11:14.682009 | 2019-09-06T17:19:57 | 2019-09-06T17:19:57 | 181,141,370 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # use heapq for heap
import heapq
def solution(stock, dates, supplies, k):
answer = 0 # answer
last_idx = 0 # save last index of dates & supplies
pq = [] # priority queue list
while stock < k:
for idx in range(last_idx, len(dates)):
if dates[idx] <= stock:
heapq.heappush(pq, -supplies[idx]) # push minus data for max heap
last_idx = idx + 1 # update last index
else:
break # if date is not a proper candidate break for loop
stock -= heapq.heappop(pq) # max heap pop
answer += 1
return answer
stock = 4
dates = [4, 10, 15]
supplies = [20, 5, 10]
k = 30
print(solution(stock, dates, supplies, k)) # 2
stock = 4
dates = [4, 9, 10]
supplies = [5, 5, 10]
k = 19
print(solution(stock, dates, supplies, k)) # 3
stock = 10
dates = [1, 2, 3]
supplies = [5, 5, 10]
k = 9
print(solution(stock, dates, supplies, k)) # 0
| UTF-8 | Python | false | false | 939 | py | 63 | rameaum.py | 58 | 0.574015 | 0.530351 | 0 | 33 | 27.454545 | 82 |
addisonLee626/LeeCode | 1,537,598,302,191 | 2f0cfb3759805670f3f4c74ae3341e80386c64c8 | 19075687e63c36122bdce550b746820c804ac2f3 | /test9.py | cdef5f75673d9f1175ed818e4bd16faea3a105a4 | [] | no_license | https://github.com/addisonLee626/LeeCode | 9c8356e09a1cba647f48957356521aa8d4d89816 | ce4b0022e0d247ca77eca1d6c07f59963f249d2f | refs/heads/master | 2023-02-22T17:09:55.791505 | 2021-01-22T10:39:43 | 2021-01-22T10:39:43 | 331,909,444 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def jumpFloorplus(number):
return 2**(number-1)
n = 8
print(jumpFloorplus(n)) | UTF-8 | Python | false | false | 82 | py | 16 | test9.py | 16 | 0.695122 | 0.658537 | 0 | 5 | 15.6 | 26 |
YueYueWoof/KFP-Engineer | 12,343,736,025,289 | d7565388ac6beb619b7b4933b52e59e3b77ba376 | 429268da1a408e44a69efc0c2032623921294f77 | /python/bots/common/PoliceResponseUtil.py | b4b721c95d7b05b2794f3e71cd5df143be987cb4 | [] | no_license | https://github.com/YueYueWoof/KFP-Engineer | e4da65207a9fd2b067c9e714cc51aa7782c8da07 | ebc3d5a4f082d98832a04b149f8bc471c5eda39c | refs/heads/main | 2023-08-26T00:31:24.717540 | 2021-10-20T05:33:36 | 2021-10-20T05:33:36 | 389,035,638 | 0 | 0 | null | true | 2021-07-24T07:50:28 | 2021-07-24T07:50:28 | 2021-07-23T02:48:06 | 2021-07-23T02:48:07 | 43,080 | 0 | 0 | 0 | null | false | false | import random
class PoliceResponseUtil():
GENERAL = [
"{name}還不去{action}?...我要生氣了....!",
".....{name}我只是叫你去{action},才不是關心你呢!",
"吶{name}.....去..去{action}啦!",
"{name}.....不來一起{action}嗎?",
"{name}我只是想讓你去{action}而已....很過分嗎?",
"{name}!還不去{action}的話我再也不理你了!",
"{name}聽....聽話!快去{action}啦......",
"{name}你又騙我.....說好的要{action}呢......",
"{name}.....頂多{action}之後給你獎勵....?",
"......{action}比較重要啦!!!{name}聽見沒!!!",
"{name}大笨蛋!!!先去{action}啦!!",
"{name}有空聊天怎麼還不去{action}!!!",
"{name}聽我的!!!去{action}!!現在立刻馬上!!!",
"{name}再不去{action}, 我會傷心的喔.....?",
"好好{action}的{name}我才會喜歡喔.....?",
"{name}快去{action}!!!連我的話都不聽了嘛......",
"....對{name}來說{action}沒有這麼難吧.....",
"算我拜託你啦....快{action}吧{name}....?",
"{name}要乖乖{action}我才.....才會.....",
"什麼時候{name}去{action}了我就.....唔....親你一下....?.....算了當我沒說....!",
"蛤啊...?{name}怎麼還沒{action}??!!不....不可以這樣啦....!",
"我說{name}啊......真的該{action}了啦.....",
"吼唷.....還不去{action}嗎{name}......",
"喂喂.....{name}快{action}啦!!!!!",
"真是的.....拿你沒辦法.....!!!!!(啾).....這之後該{action}了吧{name}.....!!",
"吶~...{name}別...別誤會!!!我才不是在撒嬌呢, 只是為了叫你去{action}而已...!!!",
"{name}以為我很想管你{action}了沒嗎?....還不是因為.....在意你......",
"總是不{action}的{name}一點都不可愛.....!!!",
"{name}你好煩......!!!到底要{action}了沒!!!",
"老是讓人操心的{name}大笨蛋!!!立刻給我去{action}!!!",
"如...如果{name}現在肯去{action}的話....本大總管就大發慈悲地誇你一下吧!",
"催{name}去{action}只是因為你話太多了而已.....!!!才沒有要關心的意思....!!!",
"{name}你以為自己是誰啊....!!!居敢無視本大總管{action}的命令....??!!",
"....煩死了啦{name}....!!!早就該去{action}了!!還要本大總管三催四請嗎!!!",
"喂喂{name}!!!本大總管命令你馬上去{action}!!!不然.....哼!!!",
"{name}!本大總管難得屈尊提醒你, 還不趕快帶著感恩的心去{action}!!!",
"{name}快去{action}!要不是本大總管心情好, 才不會理你呢...!!",
"{action}這種事情也需要提醒嗎?看在是{name}的份上...勉強說一句吧",
"說什麼呢{name}, 還敢不去{action}啊?",
"{action}說了多久,你怎麼還在?我的{name}不是只說不做的那種人吧...?",
"笨蛋{name}!!再不去{action}就要討厭你了….!!!",
"我...我希望{name}可以去{action}...這樣也不行嗎...?",
"嘖...{name},別以為我喜歡你就不會逼你去{action}....!!!",
"大笨蛋{name}!!!就是在意你才會讓你去{action}啊...!!!",
"喂{name}...!!別以為不{action}本大總管也不能拿你怎樣.....!!!",
"{name}再不去{action}就別怪我......!!!",
"趕快去{action}啦{name}...!!!這樣說得我像是你的誰一樣.....",
"喂{name}你誰啊...!!!還要本大總管低聲下氣叫你{action}...???",
"嗚{name}...拜託去{action}啦...算...算我求你了好嘛...",
"{name}!!!!!!!!{action}!!!!!!!!!",
"要不是{name}本大總管才懶得多費唇舌...所以快給我去{action}.....!!!",
"如果這樣能讓{name}去{action}的話...抱...抱一下也不是完全不能接受....",
"哼...聽好了{name},本大總管才不喜歡不乖乖{action}的人...!!",
"{name}真是的...幾歲的人了連{action}都還要勞煩本大總管提醒...!!!",
"喂...{name}你該不會真的鐵了心不去{action}吧....?",
"...本大總管大概有生之年都等不到{name}去{action}了吧...令...令人操心的傢伙...!",
"嘖...{name}要不要{action}我都不管了...真是的...",
"...我...會等你的喔..?所以{name}現在去{action}也沒關係...吧...",
"喂喂先說好...我才沒有很想關心{name}{action}了沒...單純你在這邊很煩而已...",
"蛤...?{name}該不會以為真的很在意你有沒有去{action}吧...?本大總管是被逼的...笨蛋...",
"{name}真的不去{action}...?固執到連我這個機器人都有那麼一點佩服了呢...",
"{name}再不去{action}我可是會擔心的...一、一點點而已...!!",
"{name}不是說要去{action}...?還敢混啊你這傢伙...?!",
"...如果{name}肯去{action}...我或者會更喜歡你喔...?",
]
EAT = [
"喂{name}, 吃飽了才有力氣陪我....!",
"{name}....最...最多我餵你吃...?",
"{name}....你要是餓壞了,我會難過啦.....",
"要好好吃飯{name}才會快高長大喔...?唔...到時候給你揉揉頭髮也不是不行....",
"{name}快去吃飯啦….!!!我…我可不能替你照顧自己…!!",
]
SLEEP = [
"{name}....陪....陪我睡覺好嗎?",
"{name}睡....睡不著嗎...?那我勉為其難地哄你一下.....?",
"{name}去睡覺啦....夢裏會有我喔.....?",
"{name}要好好休息.....才不是擔心你....!!!",
".....祝你好夢{name}, 說完晚安就要去睡喔....?",
"我.....我不想看到沒精打彩的{name}.....所以快去睡啦....!",
"{name}快去睡覺啦….!!!我…我可不能替你照顧自己…!!",
]
STUDY = [
"喂{name}, 要努力才配得上我啊...?",
"{name}.....我....我比較喜歡努力的你....所以要乖乖唸書喔.....?",
".....認真的{name}很有魅力....我...我是說!!!快去唸書啦!!!",
"吼{name}...既然說要唸書就別再分心了...?",
]
HOMEWORK = [
"{name}作業沒寫完不要來找我...!!!給我專心一點啊喂!!",
"寫作業還敢分心啊{name}...???",
"吼{name}...既然說要寫作業就別再分心了...?",
]
SHOWER = [
"{name}洗.....洗香香了就給你抱.....一下而已喔....!",
"{name}臭臭的不要碰我啦....!?快去洗澡!!!",
"吶{name}...是不是要我答應給你搓背你才會肯去洗澡...?",
"{name}快去洗澡啦….!!!我…我可不能替你照顧自己…!!",
]
BIRTHDAY = [
"送你一杯我精心特調的果汁,裡面包含100cc的心想事成,200cc的天天開心,300cc的活力十足,祝{name}生日快樂",
"{name}, 生日快樂!",
"這一刻,有我最深的思念。讓雲捎去滿心的祝福,點綴你甜蜜的夢,願你度過一個溫馨浪漫的生日!",
"今天是你的生日,為了表示祝賀,所有女廁和女浴室均免費向您開放,歡迎光臨!",
"在寧靜的夜晚,伴著夢幻的燭光,聽著輕輕的音樂,品著濃濃的葡萄酒,讓我陪伴你渡過一個難忘的生日!",
"日光給你鍍上成熟,月華增添你的嫵媚,在你生日這一天,願朋友的祝福匯成你快樂的源泉,一起湧向你……",
"恭喜你又老了一歲啊, {name}!",
]
def __getSpecific(type: str):
if "EAT" == type:
return PoliceResponseUtil.EAT
elif "SLEEP" == type:
return PoliceResponseUtil.SLEEP
elif "STUDY" == type:
return PoliceResponseUtil.STUDY
elif "HOMEWORK" == type:
return PoliceResponseUtil.HOMEWORK
elif "SHOWER" == type:
return PoliceResponseUtil.SHOWER
else:
return []
def getResponse(type: str):
if "BIRTHDAY" == type:
return random.choice(PoliceResponseUtil.BIRTHDAY)
return random.choice(PoliceResponseUtil.GENERAL + PoliceResponseUtil.__getSpecific(type))
| UTF-8 | Python | false | false | 8,267 | py | 139 | PoliceResponseUtil.py | 122 | 0.488596 | 0.486927 | 0 | 135 | 38.903704 | 97 |
emalp/uacOfferChecker | 19,086,834,707,203 | b5e5338a39a3449bd993e0ee63c7aa4ecaf4e5f2 | e0442eaffa51ef6b11b5e12929eee256394e5c60 | /setup.py | 1b5997b409d8e0710ba95ffbd7149358eb7b48dc | [] | no_license | https://github.com/emalp/uacOfferChecker | 975f0bb871e75a28a3b749129d3cd3f7ae589d86 | 9b7e491bed673629c3e5cd51c6c19ffbfef43aad | refs/heads/master | 2021-08-17T07:21:25.860737 | 2017-11-20T22:19:44 | 2017-11-20T22:19:44 | 111,398,097 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
setup(
name = "uacChecker",
version = '1.0',
desciption = 'Checks for your UAC undergraduate offers',
author = 'emalp',
install_requires = ['mechanize', 'bs4', 'lxml'],
zip_safe = False
)
| UTF-8 | Python | false | false | 226 | py | 5 | setup.py | 2 | 0.672566 | 0.659292 | 0 | 10 | 21.5 | 57 |
shwetha1607/Recognise-7-seg | 6,975,026,902,953 | 5ace87b898c3d3b9609d388b3196ee28be04134a | 7673aad59183b64012c654a3e901c0b1c9096760 | /webcam.py | bffb914dcdc8bf4efc05e7059c490e7a04ece777 | [] | no_license | https://github.com/shwetha1607/Recognise-7-seg | 856632cd340f9c949c3eea5fc657d6eb37c69bbf | aa48db5650b6112d0698f3995b39a71dd51e04ef | refs/heads/master | 2020-04-22T10:17:39.781861 | 2019-03-26T18:20:21 | 2019-03-26T18:20:21 | 170,300,144 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
from imutils.perspective import four_point_transform
import imutils
import numpy as np
from imutils import contours
def loc_four_point_transform(image, pts):
rect = pts
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
src = np.array(rect)
m = cv2.getPerspectiveTransform(src, dst)
ret_warped = cv2.warpPerspective(image, m, (maxWidth, maxHeight))
return ret_warped
#video = cv2.VideoCapture(0)
#check, frame = video.read()
#cv2.imshow("webcam", frame)
#cv2.imwrite("webcam2.jpg", frame)
#cv2.waitKey(0)
frame = cv2.imread("webcam2.jpg")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (11, 11), 0)
thresh = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=4)
thresh = cv2.dilate(thresh, None, iterations=10)
thresh = cv2.erode(thresh, None, iterations=2)
cv2.imshow("final thresh", thresh)
cv2.waitKey(0)
find_contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
all_contours = imutils.grab_contours(find_contours)
all_points = []
print("no of contours : " + str(len(all_contours)))
digitCnts = []
for c in all_contours:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
cnt = cv2.rectangle(thresh, (x, y), (x+w, y+h), (255, 255, 255), 3)
cv2.imshow("counter wise", cnt)
cv2.waitKey(0)
print(x, y, w, h)
if w >= 50 and h >= 100:
digitCnts.append(c)
digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]
for c in digitCnts:
(x, y, w, h) = cv2.boundingRect(c)
all_points.extend([(x, y), (x + w, y), (x + w, y + h), (x, y + h)])
points = [all_points[0], all_points[5], all_points[6], all_points[7]]
print(points)
cv2.rectangle(frame, points[0], points[2], (0, 255, 0), 3)
cv2.imshow("with box", frame)
cv2.waitKey(0)
points = np.array(points)
warped = four_point_transform(frame, points)
cv2.imshow("top view", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
#video.release()
| UTF-8 | Python | false | false | 2,621 | py | 5 | webcam.py | 5 | 0.601679 | 0.556276 | 0 | 91 | 26.736264 | 84 |
KoyanagiHitoshi/AtCoder | 8,976,481,684,295 | 401ad01be4eaf10f69124393e39fb97226037ea4 | ce32b422491e547bf220e511ee4d77213e37760e | /code/keyence2020_a_02.py | e89681de7a50b86684b16baa77ed233046e7cf50 | [
"MIT"
] | permissive | https://github.com/KoyanagiHitoshi/AtCoder | 0a006d0a751f9709dbc01b8ac00e765229605bef | e37b19bf86225577d14f83fbc6be4429c8612e3a | refs/heads/master | 2022-05-06T09:06:22.121784 | 2022-04-03T14:21:16 | 2022-04-03T14:21:16 | 172,677,103 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | H,W,N=[int(input()) for i in range(3)]
print(min((N+H-1)//H,(N+W-1)//W)) | UTF-8 | Python | false | false | 72 | py | 1,580 | keyence2020_a_02.py | 1,578 | 0.541667 | 0.5 | 0 | 2 | 35.5 | 38 |
miguelgimenezgimenez/ranking-project | 2,671,469,674,765 | d27d0d7c28d1f096152ca3d2c96a2e0bae12439a | 392c3f7cf2587c51801dda2a6228fea377d18237 | /src/services/api.py | 0c46ac1a469e53560ed4d14195c6bf1bef1fca2a | [] | no_license | https://github.com/miguelgimenezgimenez/ranking-project | d63ca2ab8636a698b514d294211b80208d8c4b2d | dd01c2ef4112ce33cdca2e300c473017df09ea52 | refs/heads/master | 2022-12-18T09:40:41.721536 | 2020-09-25T13:44:59 | 2020-09-25T13:44:59 | 297,298,563 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from dotenv import load_dotenv
import os
load_dotenv()
GITHUB_APIKEY = os.getenv("GITHUB_APIKEY")
def get_github(endpoint, apiKey=GITHUB_APIKEY, query_params={}, return_links=False):
"""
Get data from github using query parameters and passing a custom apikey header
"""
# Compose the endpoint url
baseUrl = "https://api.github.com"
url = f"{baseUrl}{endpoint}"
# Create the headers
headers = {
"Authorization": f"Bearer {apiKey}"
}
# make the request and get the response using HTTP GET verb
res = requests.get(url, params=query_params, headers=headers)
print(f"Request data to {res.url} status_code:{res.status_code}")
data = res.json()
if res.status_code != 200:
raise ValueError(f'Invalid github api call: {data["message"]}')
if return_links:
return data, res.links
return data
| UTF-8 | Python | false | false | 891 | py | 11 | api.py | 9 | 0.666667 | 0.6633 | 0 | 31 | 27.741935 | 84 |
princejaiswal03/DjangoProject | 11,639,361,373,663 | e1fc197227407c8c2dcf271b234ab9421f9a9960 | f6f046dfeaacfcc2098b3bf313d1854937b92486 | /justForFun/ResumeParsing/admin.py | a6f36408b8f130dfe553fb45bd619eb48d4816e9 | [] | no_license | https://github.com/princejaiswal03/DjangoProject | 2482f93640651617262e76c84095d99c8ff57031 | d761ade6c2c4081d4468c4f001bee2e37b067121 | refs/heads/master | 2020-04-13T12:32:12.879449 | 2018-12-26T18:01:15 | 2018-12-26T18:01:15 | 163,205,274 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import ResumeParsing
# Register your models here.
admin.site.register(ResumeParsing)
| UTF-8 | Python | false | false | 132 | py | 8 | admin.py | 4 | 0.825758 | 0.825758 | 0 | 5 | 25.4 | 34 |
cgMuro/State-of-Art | 171,798,698,470 | 165a7dd2eff530583d9c86d5b9e3b3241901f31c | 526cfe8a01e0f7ee0dbdecd835cb85094d58f985 | /DALL•E/transformer/sparse_attention.py | 1d65e8ac02b195779b59d9b9d160dbc64fd3027c | [] | no_license | https://github.com/cgMuro/State-of-Art | f0434088d1d687231649add6f1c1b6aa43c78497 | fcd798262df704dea0438e88cb631376147579ea | refs/heads/master | 2023-06-12T21:06:12.955302 | 2021-07-03T09:33:12 | 2021-07-03T09:33:12 | 345,404,445 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import numpy as np
import einops
class Attention(nn.Module):
''' A faster implementation of normal attention (the upper triangle is not computed, and many operations are fused) '''
def __init__(
self,
n_embeddings: int, # Embedding dimension of the input
n_heads: int, # Number of heads
head_dim: int = 64, # Number of dimensions for each head
attention_mode: str = 'normal', # Type of attention (normal, strided, fixed)
dropout: float = 0.5 # Dropout value
):
super().__init__()
self.n_heads = n_heads
self.attention_mode = attention_mode
self.scale = n_embeddings ** -0.5
inner_dim = head_dim * n_heads
project_out = not (n_heads == 1 and head_dim == n_embeddings) # Check if we need to project the last vector
# Define network to calculate query, value and key vectors
self.to_qkv = nn.Linear(n_embeddings, inner_dim * 3, bias=False)
# Define network to project the last vector, otherwise use the identity matrix
self.to_out = nn.Sequential(
nn.Linear(inner_dim, n_embeddings),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x: torch.Tensor):
# Get input shape
b, n, c = x.shape
# Calculate query, key and value vectors
qkv = self.to_qkv(x).chunk(3, dim=-1)
# Reshape and decompose qkv to get query, key and value vectors individually
q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> b h n d', h=self.n_heads), qkv)
# Calculate the scores and normalize (dividing by the square root of head_dim)
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# Apply mask if required
if self.attention_mode:
# Get mask
mask = get_attention_mask(n=x.size()[0], batch=x.size()[1], attention_mode='normal', local_attention_ctx=3)
# Rearrange mask
mask = einops.rearrange(mask, 'b j -> b () () j')
# Fill the scores (the "dots" matrix) with the mask values
dots.masked_fill_(mask == 0, float('-inf'))
del mask
# Softmax of the scores
attention = dots.softmax(dim=-1)
# Multiply the value vectors to the corresponding scores
out = torch.einsum('b h i j, b h j d -> b h i d', attention, v)
out = einops.rearrange(out, 'b h n d -> b n (h d)')
# Project the output vector (if needed)
out = self.to_out(out)
return out
def get_attention_mask(n: int, batch: int, attention_mode: str, local_attention_ctx: int = 32):
''' Generate 3 types of mask: normal, fixed, strided. Based on https://github.com/openai/sparse_attention/blob/c53f3bdbf6225be0582f0357072e82b13c69be7d/attention.py '''
if attention_mode == 'normal':
b = torch.tril(torch.ones((n, batch)), diagonal=0)
elif attention_mode == 'column':
bandwith = local_attention_ctx
ctx = min(n - 1, bandwith - 1)
if ctx < 0:
b = torch.tril(torch.ones((n, n)), diagonal=0)
else:
b = torch.tril(torch.ones((n, n)), diagonal=0) - torch.triu(torch.ones((n, n)), diagonal=-ctx)
b.masked_fill_(b == 1, 2)
b.masked_fill_(b == 0, 1)
b.masked_fill_(b == -1, 0)
b.masked_fill_(b == 2, 0)
elif attention_mode == 'row':
stride = local_attention_ctx
x = torch.arange(n, dtype=torch.int32).view(n, 1)
y = torch.transpose(x, 0, 1)
z = torch.zeros([n, n], dtype=torch.int32)
q = z + x
k = z + y
c1 = q >= k
c2 = ((q - k) % stride) == 0
c3 = torch.logical_and(c1, c2)
b = c3.type(torch.float32)
# stride = local_attention_ctx
# x = torch.arange(n, dtype=torch.int32).view(n, 1)
# y = torch.arange(batch, dtype=torch.int32)
# z = torch.zeros([batch, batch], dtype=torch.int32)
# q = z + x
# k = z + y
# c1 = q >= k
# c2 = ((q - k) % stride) == 0
# c3 = torch.logical_and(c1, c2)
# b = c3.type(torch.float32)
elif attention_mode == 'convolutional':
raise ValueError('Convolutional attention mask not yet implemented')
else:
raise ValueError(f'{attention_mode} not yet implemented')
# b = b.view([1, 1, n, n])
return b.type(torch.int)
| UTF-8 | Python | false | false | 4,543 | py | 51 | sparse_attention.py | 43 | 0.564825 | 0.544794 | 0 | 114 | 38.850877 | 172 |
SCismycat/MagicPython | 10,084,583,238,444 | 2ee39d23bf38dda11c39aadfc4e0938764bbc019 | f4a7cdd5fc6e3e6c032ac98fc01f6201958e358b | /parallel_python/threading_sync/called_Process.py | 1219777f52eef0088cf3dc7e49a89ec3d251b9fc | [] | no_license | https://github.com/SCismycat/MagicPython | 7ef3d23f25cdcd01049eaa4da6af3e338abdbd2c | 2bef5824aff30f56c6f4e5c7b4c326f062bb36ff | refs/heads/master | 2020-12-06T10:48:07.788462 | 2020-07-27T16:41:28 | 2020-07-27T16:41:28 | 232,444,152 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/6/15 22:25
# @Author : Leslee
print("Hi Python")
input = input("Please Enter:")
print("关闭线程")
| UTF-8 | Python | false | false | 165 | py | 27 | called_Process.py | 25 | 0.592357 | 0.509554 | 0 | 8 | 18.625 | 30 |
jayleicn/TVRetrieval | 5,738,076,322,830 | 8a012f8bf85371d25c003aca63111640441c92a9 | 098e9d4eed49a0e4573d67022d78e85fd6d2944a | /baselines/crossmodal_moment_localization/model_xml.py | a0c4b9dd670f6a8d299fc17da2a744418cf759ec | [
"MIT"
] | permissive | https://github.com/jayleicn/TVRetrieval | 36db2714b7a0c16c5fdfc2a69dd213bdc41e0670 | d99a9ea7e724249047d6357f2a607c7ae256f8c6 | refs/heads/master | 2022-09-16T05:26:28.845738 | 2022-08-20T22:13:18 | 2022-08-20T22:13:18 | 236,402,810 | 141 | 28 | MIT | false | 2021-06-22T23:18:00 | 2020-01-27T01:41:06 | 2021-06-22T03:33:35 | 2021-06-22T23:18:00 | 55,466 | 78 | 15 | 1 | Python | false | false | import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from easydict import EasyDict as edict
from baselines.crossmodal_moment_localization.model_components import \
BertAttention, PositionEncoding, LinearLayer, BertSelfAttention, TrainablePositionalEncoding, ConvEncoder
from utils.model_utils import RNNEncoder
base_bert_layer_config = dict(
hidden_size=768,
intermediate_size=768,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_attention_heads=4,
)
xml_base_config = edict(
merge_two_stream=True, # merge only the scores
cross_att=True, # cross-attention for video and subtitles
span_predictor_type="conv",
encoder_type="transformer", # cnn, transformer, lstm, gru
add_pe_rnn=False, # add positional encoding for RNNs, (LSTM and GRU)
visual_input_size=2048, # changes based on visual input type
query_input_size=768,
sub_input_size=768,
hidden_size=500, #
conv_kernel_size=5, # conv kernel_size for st_ed predictor
stack_conv_predictor_conv_kernel_sizes=-1, # Do not use
conv_stride=1, #
max_ctx_l=100,
max_desc_l=30,
input_drop=0.1, # dropout for input
drop=0.1, # dropout for other layers
n_heads=4, # self attention heads
ctx_mode="video_sub", # which context are used. 'video', 'sub' or 'video_sub'
margin=0.1, # margin for ranking loss
ranking_loss_type="hinge", # loss type, 'hinge' or 'lse'
lw_neg_q=1, # loss weight for neg. query and pos. context
lw_neg_ctx=1, # loss weight for pos. query and neg. context
lw_st_ed=1, # loss weight for st ed prediction
use_hard_negative=False, # use hard negative at video level, we may change it during training.
hard_pool_size=20,
use_self_attention=True,
no_modular=False,
pe_type="none", # no positional encoding
initializer_range=0.02,
)
class XML(nn.Module):
def __init__(self, config):
super(XML, self).__init__()
self.config = config
# self.position_embeddings = PositionEncoding(n_filters=config.hidden_size,
# max_len=config.max_position_embeddings,
# pe_type=config.pe_type)
self.query_pos_embed = TrainablePositionalEncoding(
max_position_embeddings=config.max_desc_l,
hidden_size=config.hidden_size, dropout=config.input_drop)
self.ctx_pos_embed = TrainablePositionalEncoding(
max_position_embeddings=config.max_ctx_l,
hidden_size=config.hidden_size, dropout=config.input_drop)
self.query_input_proj = LinearLayer(config.query_input_size,
config.hidden_size,
layer_norm=True,
dropout=config.input_drop,
relu=True)
if config.encoder_type == "transformer": # self-att encoder
self.query_encoder = BertAttention(edict(
hidden_size=config.hidden_size,
intermediate_size=config.hidden_size,
hidden_dropout_prob=config.drop,
attention_probs_dropout_prob=config.drop,
num_attention_heads=config.n_heads,
))
elif config.encoder_type == "cnn":
self.query_encoder = ConvEncoder(
kernel_size=5,
n_filters=config.hidden_size,
dropout=config.drop
)
elif config.encoder_type in ["gru", "lstm"]:
self.query_encoder = RNNEncoder(
word_embedding_size=config.hidden_size,
hidden_size=config.hidden_size // 2,
bidirectional=True,
n_layers=1,
rnn_type=config.encoder_type,
return_outputs=True,
return_hidden=False
)
conv_cfg = dict(in_channels=1,
out_channels=1,
kernel_size=config.conv_kernel_size,
stride=config.conv_stride,
padding=config.conv_kernel_size // 2,
bias=False)
cross_att_cfg = edict(
hidden_size=config.hidden_size,
num_attention_heads=config.n_heads,
attention_probs_dropout_prob=config.drop
)
self.use_video = "video" in config.ctx_mode
if self.use_video:
self.video_input_proj = LinearLayer(config.visual_input_size,
config.hidden_size,
layer_norm=True,
dropout=config.input_drop,
relu=True)
self.video_encoder1 = copy.deepcopy(self.query_encoder)
self.video_encoder2 = copy.deepcopy(self.query_encoder)
if self.config.cross_att:
self.video_cross_att = BertSelfAttention(cross_att_cfg)
self.video_cross_layernorm = nn.LayerNorm(config.hidden_size)
else:
if self.config.encoder_type == "transformer":
self.video_encoder3 = copy.deepcopy(self.query_encoder)
self.video_query_linear = nn.Linear(config.hidden_size, config.hidden_size)
if config.span_predictor_type == "conv":
if not config.merge_two_stream:
self.video_st_predictor = nn.Conv1d(**conv_cfg)
self.video_ed_predictor = nn.Conv1d(**conv_cfg)
elif config.span_predictor_type == "cat_linear":
self.video_st_predictor = nn.ModuleList([nn.Linear(config.hidden_size, 1) for _ in range(2)])
self.video_ed_predictor = nn.ModuleList([nn.Linear(config.hidden_size, 1) for _ in range(2)])
self.use_sub = "sub" in config.ctx_mode
if self.use_sub:
self.sub_input_proj = LinearLayer(config.sub_input_size,
config.hidden_size,
layer_norm=True,
dropout=config.input_drop,
relu=True)
self.sub_encoder1 = copy.deepcopy(self.query_encoder)
self.sub_encoder2 = copy.deepcopy(self.query_encoder)
if self.config.cross_att:
self.sub_cross_att = BertSelfAttention(cross_att_cfg)
self.sub_cross_layernorm = nn.LayerNorm(config.hidden_size)
else:
if self.config.encoder_type == "transformer":
self.sub_encoder3 = copy.deepcopy(self.query_encoder)
self.sub_query_linear = nn.Linear(config.hidden_size, config.hidden_size)
if config.span_predictor_type == "conv":
if not config.merge_two_stream:
self.sub_st_predictor = nn.Conv1d(**conv_cfg)
self.sub_ed_predictor = nn.Conv1d(**conv_cfg)
elif config.span_predictor_type == "cat_linear":
self.sub_st_predictor = nn.ModuleList([nn.Linear(config.hidden_size, 1) for _ in range(2)])
self.sub_ed_predictor = nn.ModuleList([nn.Linear(config.hidden_size, 1) for _ in range(2)])
self.modular_vector_mapping = nn.Linear(in_features=config.hidden_size,
out_features=self.use_sub + self.use_video,
bias=False)
self.temporal_criterion = nn.CrossEntropyLoss(reduction="mean")
if config.merge_two_stream and config.span_predictor_type == "conv":
if self.config.stack_conv_predictor_conv_kernel_sizes == -1:
self.merged_st_predictor = nn.Conv1d(**conv_cfg)
self.merged_ed_predictor = nn.Conv1d(**conv_cfg)
else:
print("Will be using multiple Conv layers for prediction.")
self.merged_st_predictors = nn.ModuleList()
self.merged_ed_predictors = nn.ModuleList()
num_convs = len(self.config.stack_conv_predictor_conv_kernel_sizes)
for k in self.config.stack_conv_predictor_conv_kernel_sizes:
conv_cfg = dict(in_channels=1,
out_channels=1,
kernel_size=k,
stride=config.conv_stride,
padding=k // 2,
bias=False)
self.merged_st_predictors.append(nn.Conv1d(**conv_cfg))
self.merged_ed_predictors.append(nn.Conv1d(**conv_cfg))
self.combine_st_conv = nn.Linear(num_convs, 1, bias=False)
self.combine_ed_conv = nn.Linear(num_convs, 1, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Initialize the weights."""
def re_init(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
module.reset_parameters()
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
self.apply(re_init)
def set_hard_negative(self, use_hard_negative, hard_pool_size):
"""use_hard_negative: bool; hard_pool_size: int, """
self.config.use_hard_negative = use_hard_negative
self.config.hard_pool_size = hard_pool_size
def set_train_st_ed(self, lw_st_ed):
"""pre-train video retrieval then span prediction"""
self.config.lw_st_ed = lw_st_ed
def forward(self, query_feat, query_mask, video_feat, video_mask, sub_feat, sub_mask,
tef_feat, tef_mask, st_ed_indices):
"""
Args:
query_feat: (N, Lq, Dq)
query_mask: (N, Lq)
video_feat: (N, Lv, Dv) or None
video_mask: (N, Lv) or None
sub_feat: (N, Lv, Ds) or None
sub_mask: (N, Lv) or None
tef_feat: (N, Lv, 2) or None,
tef_mask: (N, Lv) or None,
st_ed_indices: (N, 2), torch.LongTensor, 1st, 2nd columns are st, ed labels respectively.
"""
video_feat1, video_feat2, sub_feat1, sub_feat2 = \
self.encode_context(video_feat, video_mask, sub_feat, sub_mask)
query_context_scores, st_prob, ed_prob = \
self.get_pred_from_raw_query(query_feat, query_mask,
video_feat1, video_feat2, video_mask,
sub_feat1, sub_feat2, sub_mask, cross=False)
loss_st_ed = 0
if self.config.lw_st_ed != 0:
loss_st = self.temporal_criterion(st_prob, st_ed_indices[:, 0])
loss_ed = self.temporal_criterion(ed_prob, st_ed_indices[:, 1])
loss_st_ed = loss_st + loss_ed
loss_neg_ctx, loss_neg_q = 0, 0
if self.config.lw_neg_ctx != 0 or self.config.lw_neg_q != 0:
loss_neg_ctx, loss_neg_q = self.get_video_level_loss(query_context_scores)
loss_st_ed = self.config.lw_st_ed * loss_st_ed
loss_neg_ctx = self.config.lw_neg_ctx * loss_neg_ctx
loss_neg_q = self.config.lw_neg_q * loss_neg_q
loss = loss_st_ed + loss_neg_ctx + loss_neg_q
return loss, {"loss_st_ed": float(loss_st_ed),
"loss_neg_ctx": float(loss_neg_ctx),
"loss_neg_q": float(loss_neg_q),
"loss_overall": float(loss)}
def get_visualization_data(self, query_feat, query_mask, video_feat, video_mask, sub_feat, sub_mask,
tef_feat, tef_mask, st_ed_indices):
assert self.config.merge_two_stream and self.use_video and self.use_sub and not self.config.no_modular
video_feat1, video_feat2, sub_feat1, sub_feat2 = \
self.encode_context(video_feat, video_mask, sub_feat, sub_mask)
encoded_query = self.encode_input(query_feat, query_mask,
self.query_input_proj, self.query_encoder, self.query_pos_embed) # (N, Lq, D)
# (N, D), (N, D), (N, L, 2)
video_query, sub_query, modular_att_scores = \
self.get_modularized_queries(encoded_query, query_mask, return_modular_att=True)
# (N, L), (N, L), (N, L)
st_prob, ed_prob, similarity_scores, video_similarity, sub_similarity = self.get_merged_st_ed_prob(
video_query, video_feat2, sub_query, sub_feat2, video_mask, cross=False, return_similaity=True)
# clean up invalid bits
data = dict(modular_att_scores=modular_att_scores.cpu().numpy(), # (N, Lq, 2), row 0, 1 are video, sub.
st_prob=st_prob.cpu().numpy(), # (N, L)
ed_prob=ed_prob.cpu().numpy(), # (N, L)
similarity_scores=similarity_scores.cpu().numpy(), # (N, L)
video_similarity=video_similarity.cpu().numpy(), # (N, L)
sub_similarity=sub_similarity.cpu().numpy(), # (N, L)
st_ed_indices=st_ed_indices.cpu().numpy()) # (N, L)
query_lengths = query_mask.sum(1).to(torch.long).cpu().tolist() # (N, )
ctx_lengths = video_mask.sum(1).to(torch.long).cpu().tolist() # (N, )
# print("query_lengths {}".format((type(query_lengths), len(query_lengths), query_lengths[:10])))
for k, v in data.items():
if k == "modular_att_scores":
# print(k, v, v.shape, type(v))
data[k] = [e[:l] for l, e in zip(query_lengths, v)] # list(e) where e is (Lq_i, 2)
else:
data[k] = [e[:l] for l, e in zip(ctx_lengths, v)] # list(e) where e is (Lc_i)
# aggregate info for each example
datalist = []
for idx in range(len(data["modular_att_scores"])):
datalist.append({k: v[idx] for k, v in data.items()})
return datalist # list(dicts) of length N
def encode_query(self, query_feat, query_mask):
encoded_query = self.encode_input(query_feat, query_mask,
self.query_input_proj, self.query_encoder, self.query_pos_embed) # (N, Lq, D)
video_query, sub_query = self.get_modularized_queries(encoded_query, query_mask) # (N, D) * 2
return video_query, sub_query
def non_cross_encode_context(self, context_feat, context_mask, module_name="video"):
encoder_layer3 = getattr(self, module_name + "_encoder3") \
if self.config.encoder_type == "transformer" else None
return self._non_cross_encode_context(context_feat, context_mask,
input_proj_layer=getattr(self, module_name + "_input_proj"),
encoder_layer1=getattr(self, module_name + "_encoder1"),
encoder_layer2=getattr(self, module_name + "_encoder2"),
encoder_layer3=encoder_layer3)
def _non_cross_encode_context(self, context_feat, context_mask, input_proj_layer,
encoder_layer1, encoder_layer2, encoder_layer3=None):
"""
Args:
context_feat: (N, L, D)
context_mask: (N, L)
input_proj_layer:
encoder_layer1:
encoder_layer2:
encoder_layer3
"""
context_feat1 = self.encode_input(
context_feat, context_mask, input_proj_layer, encoder_layer1, self.ctx_pos_embed) # (N, L, D)
if self.config.encoder_type in ["transformer", "cnn"]:
context_mask = context_mask.unsqueeze(1) # (N, 1, L), torch.FloatTensor
context_feat2 = encoder_layer2(context_feat1, context_mask) # (N, L, D)
if self.config.encoder_type == "transformer":
context_feat2 = encoder_layer3(context_feat2, context_mask)
elif self.config.encoder_type in ["gru", "lstm"]:
context_mask = context_mask.sum(1).long() # (N, ), torch.LongTensor
context_feat2 = encoder_layer2(context_feat1, context_mask)[0] # (N, L, D)
else:
raise NotImplementedError
return context_feat1, context_feat2
def encode_context(self, video_feat, video_mask, sub_feat, sub_mask):
if self.config.cross_att:
assert self.use_video and self.use_sub
return self.cross_encode_context(video_feat, video_mask, sub_feat, sub_mask)
else:
video_feat1, video_feat2 = (None,) * 2
if self.use_video:
video_feat1, video_feat2 = self.non_cross_encode_context(video_feat, video_mask, module_name="video")
sub_feat1, sub_feat2 = (None,) * 2
if self.use_sub:
sub_feat1, sub_feat2 = self.non_cross_encode_context(sub_feat, sub_mask, module_name="sub")
return video_feat1, video_feat2, sub_feat1, sub_feat2
def cross_encode_context(self, video_feat, video_mask, sub_feat, sub_mask):
encoded_video_feat = self.encode_input(video_feat, video_mask,
self.video_input_proj, self.video_encoder1, self.ctx_pos_embed)
encoded_sub_feat = self.encode_input(sub_feat, sub_mask,
self.sub_input_proj, self.sub_encoder1, self.ctx_pos_embed)
x_encoded_video_feat = self.cross_context_encoder(
encoded_video_feat, video_mask, encoded_sub_feat, sub_mask,
self.video_cross_att, self.video_cross_layernorm, self.video_encoder2) # (N, L, D)
x_encoded_sub_feat = self.cross_context_encoder(
encoded_sub_feat, sub_mask, encoded_video_feat, video_mask,
self.sub_cross_att, self.sub_cross_layernorm, self.sub_encoder2) # (N, L, D)
return encoded_video_feat, x_encoded_video_feat, encoded_sub_feat, x_encoded_sub_feat
def cross_context_encoder(self, main_context_feat, main_context_mask, side_context_feat, side_context_mask,
cross_att_layer, norm_layer, self_att_layer):
"""
Args:
main_context_feat: (N, Lq, D)
main_context_mask: (N, Lq)
side_context_feat: (N, Lk, D)
side_context_mask: (N, Lk)
cross_att_layer:
norm_layer:
self_att_layer:
"""
cross_mask = torch.einsum("bm,bn->bmn", main_context_mask, side_context_mask) # (N, Lq, Lk)
cross_out = cross_att_layer(main_context_feat, side_context_feat, side_context_feat, cross_mask) # (N, Lq, D)
residual_out = norm_layer(cross_out + main_context_feat)
if self.config.encoder_type in ["cnn", "transformer"]:
return self_att_layer(residual_out, main_context_mask.unsqueeze(1))
elif self.config.encoder_type in ["gru", "lstm"]:
return self_att_layer(residual_out, main_context_mask.sum(1).long())[0]
def encode_input(self, feat, mask, input_proj_layer, encoder_layer, pos_embed_layer):
"""
Args:
feat: (N, L, D_input), torch.float32
mask: (N, L), torch.float32, with 1 indicates valid query, 0 indicates mask
input_proj_layer: down project input
encoder_layer: encoder layer
# add_pe: bool, whether to add positional encoding
pos_embed_layer
"""
feat = input_proj_layer(feat)
if self.config.encoder_type in ["cnn", "transformer"]:
feat = pos_embed_layer(feat)
mask = mask.unsqueeze(1) # (N, 1, L), torch.FloatTensor
return encoder_layer(feat, mask) # (N, L, D_hidden)
elif self.config.encoder_type in ["gru", "lstm"]:
if self.config.add_pe_rnn:
feat = pos_embed_layer(feat)
mask = mask.sum(1).long() # (N, ), torch.LongTensor
return encoder_layer(feat, mask)[0] # (N, L, D_hidden)
def get_modularized_queries(self, encoded_query, query_mask, return_modular_att=False):
"""
Args:
encoded_query: (N, L, D)
query_mask: (N, L)
return_modular_att: bool
"""
if self.config.no_modular:
modular_query = torch.max(mask_logits(encoded_query, query_mask.unsqueeze(2)), dim=1)[0] # (N, D)
return modular_query, modular_query #
else:
modular_attention_scores = self.modular_vector_mapping(encoded_query) # (N, L, 2 or 1)
modular_attention_scores = F.softmax(
mask_logits(modular_attention_scores, query_mask.unsqueeze(2)), dim=1)
# TODO check whether it is the same
modular_queries = torch.einsum("blm,bld->bmd",
modular_attention_scores, encoded_query) # (N, 2 or 1, D)
if return_modular_att:
assert modular_queries.shape[1] == 2
return modular_queries[:, 0], modular_queries[:, 1], modular_attention_scores
else:
if modular_queries.shape[1] == 2:
return modular_queries[:, 0], modular_queries[:, 1] # (N, D) * 2
else: # 1
return modular_queries[:, 0], modular_queries[:, 0] # the same
def get_modular_weights(self, encoded_query, query_mask):
"""
Args:
encoded_query: (N, L, D)
query_mask: (N, L)
"""
max_encoded_query, _ = torch.max(mask_logits(encoded_query, query_mask.unsqueeze(2)), dim=1) # (N, D)
modular_weights = self.modular_weights_calculator(max_encoded_query) # (N, 2)
modular_weights = F.softmax(modular_weights, dim=-1)
return modular_weights[:, 0:1], modular_weights[:, 1:2] # (N, 1) * 2
def get_video_level_scores(self, modularied_query, context_feat1, context_mask):
""" Calculate video2query scores for each pair of video and query inside the batch.
Args:
modularied_query: (N, D)
context_feat1: (N, L, D), output of the first transformer encoder layer
context_mask: (N, L)
Returns:
context_query_scores: (N, N) score of each query w.r.t. each video inside the batch,
diagonal positions are positive. used to get negative samples.
"""
modularied_query = F.normalize(modularied_query, dim=-1)
context_feat1 = F.normalize(context_feat1, dim=-1)
query_context_scores = torch.einsum("md,nld->mln", modularied_query, context_feat1) # (N, L, N)
context_mask = context_mask.transpose(0, 1).unsqueeze(0) # (1, L, N)
query_context_scores = mask_logits(query_context_scores, context_mask) # (N, L, N)
query_context_scores, _ = torch.max(query_context_scores,
dim=1) # (N, N) diagonal positions are positive pairs.
return query_context_scores
def get_merged_st_ed_prob(self, video_query, video_feat, sub_query, sub_feat, context_mask,
cross=False, return_similaity=False):
"""context_mask could be either video_mask or sub_mask, since they are the same"""
assert self.use_video and self.use_sub and self.config.span_predictor_type == "conv"
video_query = self.video_query_linear(video_query)
sub_query = self.sub_query_linear(sub_query)
stack_conv = self.config.stack_conv_predictor_conv_kernel_sizes != -1
num_convs = len(self.config.stack_conv_predictor_conv_kernel_sizes) if stack_conv else None
if cross:
video_similarity = torch.einsum("md,nld->mnl", video_query, video_feat)
sub_similarity = torch.einsum("md,nld->mnl", sub_query, sub_feat)
similarity = (video_similarity + sub_similarity) / 2 # (Nq, Nv, L) from query to all videos.
n_q, n_c, l = similarity.shape
similarity = similarity.view(n_q * n_c, 1, l)
if not stack_conv:
st_prob = self.merged_st_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
ed_prob = self.merged_ed_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
else:
st_prob_list = []
ed_prob_list = []
for idx in range(num_convs):
st_prob_list.append(self.merged_st_predictors[idx](similarity).squeeze().unsqueeze(2))
ed_prob_list.append(self.merged_ed_predictors[idx](similarity).squeeze().unsqueeze(2))
# (Nq*Nv, L, 3) --> (Nq*Nv, L) -> (Nq, Nv, L)
st_prob = self.combine_st_conv(torch.cat(st_prob_list, dim=2)).view(n_q, n_c, l)
ed_prob = self.combine_ed_conv(torch.cat(ed_prob_list, dim=2)).view(n_q, n_c, l)
else:
video_similarity = torch.einsum("bd,bld->bl", video_query, video_feat) # (N, L)
sub_similarity = torch.einsum("bd,bld->bl", sub_query, sub_feat) # (N, L)
similarity = (video_similarity + sub_similarity) / 2
if not stack_conv:
st_prob = self.merged_st_predictor(similarity.unsqueeze(1)).squeeze() # (N, L)
ed_prob = self.merged_ed_predictor(similarity.unsqueeze(1)).squeeze() # (N, L)
else:
st_prob_list = []
ed_prob_list = []
for idx in range(num_convs):
st_prob_list.append(self.merged_st_predictors[idx](similarity.unsqueeze(1)).squeeze().unsqueeze(2))
ed_prob_list.append(self.merged_ed_predictors[idx](similarity.unsqueeze(1)).squeeze().unsqueeze(2))
st_prob = self.combine_st_conv(torch.cat(st_prob_list, dim=2)).squeeze() # (N, L, 3) --> (N, L)
ed_prob = self.combine_ed_conv(torch.cat(ed_prob_list, dim=2)).squeeze() # (N, L, 3) --> (N, L)
st_prob = mask_logits(st_prob, context_mask) # (N, L)
ed_prob = mask_logits(ed_prob, context_mask)
if return_similaity:
assert not cross
return st_prob, ed_prob, similarity, video_similarity, sub_similarity
else:
return st_prob, ed_prob
def get_st_ed_prob(self, modularied_query, context_feat2, context_mask,
module_name="video", cross=False):
return self._get_st_ed_prob(modularied_query, context_feat2, context_mask,
module_query_linear=getattr(self, module_name + "_query_linear"),
st_predictor=getattr(self, module_name + "_st_predictor"),
ed_predictor=getattr(self, module_name + "_ed_predictor"),
cross=cross)
def _get_st_ed_prob(self, modularied_query, context_feat2, context_mask,
module_query_linear, st_predictor, ed_predictor, cross=False):
"""
Args:
modularied_query: (N, D)
context_feat2: (N, L, D), output of the first transformer encoder layer
context_mask: (N, L)
module_query_linear:
st_predictor:
ed_predictor:
cross: at inference, calculate prob for each possible pairs of query and context.
"""
query = module_query_linear(modularied_query) # (N, D) no need to normalize here.
if cross:
if self.config.span_predictor_type == "conv":
similarity = torch.einsum("md,nld->mnl", query, context_feat2) # (Nq, Nv, L) from query to all videos.
n_q, n_c, l = similarity.shape
similarity = similarity.view(n_q * n_c, 1, l)
st_prob = st_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
ed_prob = ed_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
elif self.config.span_predictor_type == "cat_linear":
st_prob_q = st_predictor[0](query).unsqueeze(1) # (Nq, 1, 1)
st_prob_ctx = st_predictor[1](context_feat2).squeeze().unsqueeze(0) # (1, Nv, L)
st_prob = st_prob_q + st_prob_ctx # (Nq, Nv, L)
ed_prob_q = ed_predictor[0](query).unsqueeze(1) # (Nq, 1, 1)
ed_prob_ctx = ed_predictor[1](context_feat2).squeeze().unsqueeze(0) # (1, Nv, L)
ed_prob = ed_prob_q + ed_prob_ctx # (Nq, Nv, L)
context_mask = context_mask.unsqueeze(0) # (1, Nv, L)
else:
if self.config.span_predictor_type == "conv":
similarity = torch.einsum("bd,bld->bl", query, context_feat2) # (N, L)
st_prob = st_predictor(similarity.unsqueeze(1)).squeeze() # (N, L)
ed_prob = ed_predictor(similarity.unsqueeze(1)).squeeze() # (N, L)
elif self.config.span_predictor_type == "cat_linear":
# avoid concatenation by break into smaller matrix multiplications.
st_prob = st_predictor[0](query) + st_predictor[1](context_feat2).squeeze() # (N, L)
ed_prob = ed_predictor[0](query) + ed_predictor[1](context_feat2).squeeze() # (N, L)
st_prob = mask_logits(st_prob, context_mask) # (N, L)
ed_prob = mask_logits(ed_prob, context_mask)
return st_prob, ed_prob
def get_pred_from_raw_query(self, query_feat, query_mask,
video_feat1, video_feat2, video_mask,
sub_feat1, sub_feat2, sub_mask, cross=False):
"""
Args:
query_feat: (N, Lq, Dq)
query_mask: (N, Lq)
video_feat1: (N, Lv, D) or None
video_feat2:
video_mask: (N, Lv)
sub_feat1: (N, Lv, D) or None
sub_feat2:
sub_mask: (N, Lv)
cross:
"""
video_query, sub_query = self.encode_query(query_feat, query_mask)
divisor = self.use_sub + self.use_video
# get video-level retrieval scores
video_q2ctx_scores = self.get_video_level_scores(video_query, video_feat1, video_mask) if self.use_video else 0
sub_q2ctx_scores = self.get_video_level_scores(sub_query, sub_feat1, sub_mask) if self.use_sub else 0
q2ctx_scores = (video_q2ctx_scores + sub_q2ctx_scores) / divisor # (N, N)
if self.config.merge_two_stream and self.use_video and self.use_sub:
st_prob, ed_prob = self.get_merged_st_ed_prob(
video_query, video_feat2, sub_query, sub_feat2, video_mask, cross=cross)
else:
video_st_prob, video_ed_prob = self.get_st_ed_prob(
video_query, video_feat2, video_mask, module_name="video", cross=cross) if self.use_video else (0, 0)
sub_st_prob, sub_ed_prob = self.get_st_ed_prob(
sub_query, sub_feat2, sub_mask, module_name="sub", cross=cross) if self.use_sub else (0, 0)
st_prob = (video_st_prob + sub_st_prob) / divisor # (N, Lv)
ed_prob = (video_ed_prob + sub_ed_prob) / divisor # (N, Lv)
return q2ctx_scores, st_prob, ed_prob # un-normalized masked probabilities!!!!!
def get_video_level_loss(self, query_context_scores):
""" ranking loss between (pos. query + pos. video) and (pos. query + neg. video) or (neg. query + pos. video)
Args:
query_context_scores: (N, N), cosine similarity [-1, 1],
Each row contains the scores between the query to each of the videos inside the batch.
"""
bsz = len(query_context_scores)
diagonal_indices = torch.arange(bsz).to(query_context_scores.device)
pos_scores = query_context_scores[diagonal_indices, diagonal_indices] # (N, )
query_context_scores_masked = copy.deepcopy(query_context_scores.data)
# impossibly large for cosine similarity, the copy is created as modifying the original will cause error
query_context_scores_masked[diagonal_indices, diagonal_indices] = 999
pos_query_neg_context_scores = self.get_neg_scores(query_context_scores,
query_context_scores_masked)
neg_query_pos_context_scores = self.get_neg_scores(query_context_scores.transpose(0, 1),
query_context_scores_masked.transpose(0, 1))
loss_neg_ctx = self.get_ranking_loss(pos_scores, pos_query_neg_context_scores)
loss_neg_q = self.get_ranking_loss(pos_scores, neg_query_pos_context_scores)
return loss_neg_ctx, loss_neg_q
def get_neg_scores(self, scores, scores_masked):
"""
scores: (N, N), cosine similarity [-1, 1],
Each row are scores: query --> all videos. Transposed version: video --> all queries.
scores_masked: (N, N) the same as scores, except that the diagonal (positive) positions
are masked with a large value.
"""
bsz = len(scores)
batch_indices = torch.arange(bsz).to(scores.device)
_, sorted_scores_indices = torch.sort(scores_masked, descending=True, dim=1)
sample_min_idx = 1 # skip the masked positive
sample_max_idx = min(sample_min_idx + self.config.hard_pool_size, bsz) \
if self.config.use_hard_negative else bsz
sampled_neg_score_indices = sorted_scores_indices[
batch_indices, torch.randint(sample_min_idx, sample_max_idx, size=(bsz,)).to(scores.device)] # (N, )
sampled_neg_scores = scores[batch_indices, sampled_neg_score_indices] # (N, )
return sampled_neg_scores
def get_ranking_loss(self, pos_score, neg_score):
""" Note here we encourage positive scores to be larger than negative scores.
Args:
pos_score: (N, ), torch.float32
neg_score: (N, ), torch.float32
"""
if self.config.ranking_loss_type == "hinge": # max(0, m + S_neg - S_pos)
return torch.clamp(self.config.margin + neg_score - pos_score, min=0).sum() / len(pos_score)
elif self.config.ranking_loss_type == "lse": # log[1 + exp(S_neg - S_pos)]
return torch.log1p(torch.exp(neg_score - pos_score)).sum() / len(pos_score)
else:
raise NotImplementedError("Only support 'hinge' and 'lse'")
def mask_logits(target, mask):
return target * mask + (1 - mask) * (-1e10)
| UTF-8 | Python | false | false | 35,047 | py | 74 | model_xml.py | 59 | 0.56293 | 0.553371 | 0 | 641 | 53.675507 | 120 |
zzjlogin/mydoc | 14,654,428,431,666 | 6b2d4da667b54d758a8574d359ce4e7aefeeef2b | a1a0d4ea9cd8d84b9e85fff3f56ae3ec605cf4a6 | /source/demo/argparse/class/07-formatter_class.py | 7776852d85ca9e0463f40cdd54fc23db5364a5ac | [] | no_license | https://github.com/zzjlogin/mydoc | 20007b85a44950d07530c18011b4108af83e3c7a | abe0b08c3b6acf8508152f80ab07915441ab3aab | refs/heads/master | 2023-06-04T07:31:09.812638 | 2021-06-19T19:15:41 | 2021-06-19T19:15:41 | 146,100,219 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
parser = argparse.ArgumentParser(
prog="test",
description="测试formatter_class",
epilog="这里是epilog信息",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--foo', type=int, default=42, help='FOO!')
parser.add_argument('bar', nargs='*', default=[1, 2, 3], help='BAR!')
print("\n默认(argparse.ArgumentDefaultsHelpFormatter)的格式化输出\n")
parser.print_help()
parser.formatter_class=argparse.RawDescriptionHelpFormatter
print("\n(argparse.RawDescriptionHelpFormatter¶)的格式化输出\n")
parser.print_help()
parser.formatter_class=argparse.RawTextHelpFormatter
print("\n(argparse.RawTextHelpFormatter)的格式化输出\n")
parser.print_help()
| UTF-8 | Python | false | false | 737 | py | 568 | 07-formatter_class.py | 29 | 0.760997 | 0.753666 | 0 | 22 | 29.909091 | 69 |
Ledeor/ISU-2-CIM-Converter | 16,501,264,352,710 | 45426971c7f195e120223dffea1ef98fe55da250 | faf1b94fc64b6b58ada9de2a7353b5c9c19bc53c | /cimLocation.py | b40c110b13cdf13ef2667dedca5ee7386f48726b | [] | no_license | https://github.com/Ledeor/ISU-2-CIM-Converter | 1b9963955a10c91b5255f60a4148cd71dc2e63dc | bf693040d347579aa7b1c9e9dc2160efb78ccb14 | refs/heads/master | 2021-01-17T13:18:58.808582 | 2016-06-21T12:19:13 | 2016-06-21T12:19:13 | 59,199,157 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: roddi
#
# Created: 17.03.2016
# Copyright: (c) roddi 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import sys
#sys.path.append('./DOM')
import cimIdentifiedObject
import serialization
class TownDetail:
def __init__(self, name, code, stateOrProvince):
self.name = name
self.code = code
self.stateOrProvince = stateOrProvince
class StreetDetail:
def __init__(self, name, number):
self.name = name
self.number = number
class StreetAddress:
def __init__(self, streetName, streetNr, townName, townCode, state):
self.streetDetail = StreetDetail(streetName, streetNr)
self.townDetail = TownDetail(townName, townCode, state)
class Location(cimIdentifiedObject.IdentifiedObject):
def __init__(self, mRID):
cimIdentifiedObject.IdentifiedObject.__init__(self, mRID)
self.mainAddress = StreetAddress("", "", "", "", "")
self.secondaryAddress = ""
def setMainAddress(self, streetName, streetNr, townName, townCode, stateOrProvince):
self.mainAddress.streetDetail.name = serialization.serialEncode(streetName)
self.mainAddress.streetDetail.number = serialization.serialEncode(streetNr)
self.mainAddress.townDetail.name = serialization.serialEncode(townName)
self.mainAddress.townDetail.code = serialization.serialEncode(townCode)
self.mainAddress.townDetail.stateOrProvince = serialization.serialEncode(stateOrProvince)
def setSecondaryAddress(self, sAddr):
self.secondaryAddress = serialization.serialEncode(sAddr)
def getMainAddress(self):
mAddrL = []
mAddrL.append(self.mainAddress.streetDetail.name)
mAddrL.append(self.mainAddress.streetDetail.number)
mAddrL.append(self.mainAddress.townDetail.name)
mAddrL.append(self.mainAddress.townDetail.code)
mAddrL.append(self.mainAddress.townDetail.stateOrProvince)
return mAddrL
def serialize(self):
sContent = cimIdentifiedObject.IdentifiedObject.serialize(self)
sMainAddress = serialization.serialIndent + serialization.serialIndent + "<cim:Location.mainAddress>"
sMainAddress = sMainAddress + ",".join(self.getMainAddress())
sMainAddress = sMainAddress + "</cim:Location.mainAddress>" + '\n'
return sContent + sMainAddress | UTF-8 | Python | false | false | 2,597 | py | 15 | cimLocation.py | 14 | 0.640354 | 0.635348 | 0 | 65 | 37.984615 | 109 |
KB-perByte/CodePedia | 16,192,026,752,663 | bd335dd5e4d6a0878772f99c25037e64f2b2762b | 89d230ad44d17b18897da507725b0a10c32960d8 | /Gen2_0_PP/Assignment/leetcode_combinationSumIII.py | ee799f729513789eb1e3df5026cd4a5a306b41af | [] | no_license | https://github.com/KB-perByte/CodePedia | aeeae87b56cf0ff6e02200cfd6b34da42a007338 | 287e7a3ce981bbf594436cdc06dde23a02b53bb0 | refs/heads/master | 2021-06-19T07:32:53.849871 | 2021-01-23T16:17:27 | 2021-01-23T16:17:27 | 163,250,017 | 0 | 1 | null | false | 2020-03-21T14:39:36 | 2018-12-27T05:13:55 | 2020-03-21T14:39:13 | 2020-03-21T14:39:34 | 119 | 0 | 0 | 2 | JavaScript | false | false | class Solution:
def __init__(self):
self.answer = set()
def helper(self,k,n,ans=set()):
if n<0 or k<0: return
elif n==0 and k==0:
self.answer.add(tuple(sorted(list(ans))))
return
elif n==0 and k>0:
return
for i in range(1,10):
if i not in ans and n>=i and k>0:
self.helper(k-1,n-i,ans|{i})
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
self.helper(k,n)
return self.answer | UTF-8 | Python | false | false | 519 | py | 435 | leetcode_combinationSumIII.py | 419 | 0.499037 | 0.475915 | 0 | 16 | 31.5 | 65 |
google/clusterfuzz | 8,598,524,543,103 | 6e39e310d65e86955105d84791c61dddb2e81c80 | 3afe7348e830a0c5139fb7cf393736e18b59ab4a | /src/clusterfuzz/_internal/bot/tasks/task_creation.py | 83a2a88710604c699acae0d0d0aedc3c31c66490 | [
"Apache-2.0"
] | permissive | https://github.com/google/clusterfuzz | 00845899e081dbbb89b70a75ce0b7eba3da73b02 | 6501a839b27a264500244f32bace8bee4d5cb9a2 | refs/heads/master | 2023-09-03T17:34:17.821599 | 2023-09-01T16:11:51 | 2023-09-01T16:11:51 | 168,060,021 | 5,420 | 639 | Apache-2.0 | false | 2023-09-13T16:40:54 | 2019-01-29T00:19:40 | 2023-09-13T14:30:24 | 2023-09-13T16:40:53 | 95,720 | 5,077 | 538 | 258 | Python | false | false | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions for task creation for test cases."""
from clusterfuzz._internal.base import bisection
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.build_management import build_manager
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
"""Check to see if a test case appears to be flaky."""
task_name = environment.get_value('TASK_NAME')
# If this run does not suggest that we are flaky, clear the flag and assume
# that we are reproducible.
if not potentially_flaky:
testcase.set_metadata('potentially_flaky', False)
return
# If we have not been marked as potentially flaky in the past, don't mark
# mark the test case as unreproducible yet. It is now potentially flaky.
if not testcase.get_metadata('potentially_flaky'):
testcase.set_metadata('potentially_flaky', True)
# In this case, the current task will usually be in a state where it cannot
# be completed. Recreate it.
tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
return
# At this point, this test case has been flagged as potentially flaky twice.
# It should be marked as unreproducible. Mark it as unreproducible, and set
# fields that cannot be populated accordingly.
if task_name == 'minimize' and not testcase.minimized_keys:
testcase.minimized_keys = 'NA'
if task_name in ['minimize', 'impact']:
testcase.set_impacts_as_na()
if task_name in ['minimize', 'regression']:
testcase.regression = 'NA'
if task_name in ['minimize', 'progression']:
testcase.fixed = 'NA'
testcase.one_time_crasher_flag = True
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Testcase appears to be flaky')
# Issue update to flip reproducibility label is done in App Engine cleanup
# cron. This avoids calling the issue tracker apis from GCE.
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_blame_task_if_needed(testcase)
# Let bisection service know about flakiness.
bisection.request_bisection(testcase)
def create_blame_task_if_needed(testcase):
"""Creates a blame task if needed."""
# Blame doesn't work for non-chromium projects.
if not utils.is_chromium():
return
# Blame is only applicable to chromium project, otherwise bail out.
if testcase.project_name != 'chromium':
return
# We cannot run blame job for custom binaries since we don't have any context
# on the crash revision and regression range.
if build_manager.is_custom_binary():
return
# Don't send duplicate issues to Predator. This causes issues with metrics
# tracking and wastes cycles.
if testcase.status == 'Duplicate':
return
create_task = False
if testcase.one_time_crasher_flag:
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_task = True
else:
# Reproducible testcase.
# Step 1: Check if the regression task finished. If not, bail out.
if not testcase.regression:
return
# Step 2: Check if the symbolize task is applicable and finished. If not,
# bail out.
if build_manager.has_symbolized_builds() and not testcase.symbolized:
return
create_task = True
if create_task:
tasks.add_task('blame', testcase.key.id(), testcase.job_type)
def create_impact_task_if_needed(testcase):
"""Creates an impact task if needed."""
# Impact doesn't make sense for non-chromium projects.
if not utils.is_chromium():
return
# Impact is only applicable to chromium project, otherwise bail out.
if testcase.project_name != 'chromium':
return
# We cannot run impact job for custom binaries since we don't have any
# archived production builds for these.
if build_manager.is_custom_binary():
return
tasks.add_task('impact', testcase.key.id(), testcase.job_type)
def create_minimize_task_if_needed(testcase):
"""Creates a minimize task if needed."""
tasks.add_task('minimize', testcase.key.id(), testcase.job_type)
def create_regression_task_if_needed(testcase):
"""Creates a regression task if needed."""
# We cannot run regression job for custom binaries since we don't have any
# archived builds for previous revisions. We only track the last uploaded
# custom build.
if build_manager.is_custom_binary():
return
tasks.add_task('regression', testcase.key.id(), testcase.job_type)
def create_variant_tasks_if_needed(testcase):
"""Creates a variant task if needed."""
if testcase.duplicate_of:
# If another testcase exists with same params, no need to spend cycles on
# calculating variants again.
return
testcase_id = testcase.key.id()
project = data_handler.get_project_name(testcase.job_type)
jobs = data_types.Job.query(data_types.Job.project == project)
testcase_job_is_engine = environment.is_engine_fuzzer_job(testcase.job_type)
testcase_job_app_name = None
if not testcase_job_is_engine:
testcase_job = (
data_types.Job.query(data_types.Job.name == testcase.job_type).get())
testcase_job_environment = testcase_job.get_environment()
testcase_job_app_name = testcase_job_environment.get('APP_NAME')
num_variant_tasks = 0
for job in jobs:
# The variant needs to be tested in a different job type than us.
job_type = job.name
if testcase.job_type == job_type:
continue
# Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
# testcases and vice versa.
if testcase_job_is_engine != environment.is_engine_fuzzer_job(job_type):
continue
# Skip experimental jobs.
job_environment = job.get_environment()
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip jobs for which variant tasks are disabled.
if utils.string_is_true(job_environment.get('DISABLE_VARIANT')):
continue
if (not testcase_job_is_engine and
job_environment.get('APP_NAME') != testcase_job_app_name):
continue
queue = tasks.queue_for_platform(job.platform)
tasks.add_task('variant', testcase_id, job_type, queue)
variant = data_handler.get_or_create_testcase_variant(testcase_id, job_type)
variant.status = data_types.TestcaseVariantStatus.PENDING
variant.put()
num_variant_tasks += 1
logs.log(f'Number of variant tasks: {num_variant_tasks}.')
def create_symbolize_task_if_needed(testcase):
"""Creates a symbolize task if needed."""
# We cannot run symbolize job for custom binaries since we don't have any
# archived symbolized builds.
if build_manager.is_custom_binary():
return
# Make sure we have atleast one symbolized url pattern defined in job type.
if not build_manager.has_symbolized_builds():
return
tasks.add_task('symbolize', testcase.key.id(), testcase.job_type)
def create_tasks(testcase):
"""Create tasks like minimization, regression, impact, progression, stack
stack for a newly generated testcase."""
# No need to create progression task. It is automatically created by the cron
# handler for reproducible testcases.
# For a non reproducible crash.
if testcase.one_time_crasher_flag:
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_blame_task_if_needed(testcase)
return
# For a fully reproducible crash.
# MIN environment variable defined in a job definition indicates if
# we want to do the heavy weight tasks like minimization, regression,
# impact, etc on this testcase. These are usually skipped when we have
# a large timeout and we can't afford to waste more than a couple of hours
# on these jobs.
testcase_id = testcase.key.id()
if environment.get_value('MIN') == 'No':
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.minimized_keys = 'NA'
testcase.regression = 'NA'
testcase.set_impacts_as_na()
testcase.put()
return
# Just create the minimize task for now. Once minimization is complete, it
# automatically created the rest of the needed tasks.
create_minimize_task_if_needed(testcase)
| UTF-8 | Python | false | false | 9,027 | py | 984 | task_creation.py | 510 | 0.723607 | 0.722278 | 0 | 245 | 35.844898 | 80 |
chung1905/skynet-chatbot | 15,195,594,313,800 | 7b7fda16f1d5259f710c135a5579181a6e3348f1 | 15d0b30acfce59191ddd0006449d70f0b7e77470 | /browsers/phantomjs.py | 5fdde598fd84b7fbdcff1f91169426737e3bf6fc | [
"MIT"
] | permissive | https://github.com/chung1905/skynet-chatbot | 700db890bf31c0260d48474328b2d0e5d830163e | 932a45f81f1bb78a49d1c968d325319fd3c4cc73 | refs/heads/master | 2022-08-08T23:00:58.800298 | 2020-04-17T11:09:10 | 2020-04-17T11:09:10 | 256,136,980 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from selenium import webdriver
from os import path
def get_browser(system: str, root_dir: str) -> webdriver.Firefox:
executable_path = path.abspath(root_dir + '/browsers/phantomjs/' + system + '/bin/phantomjs')
return webdriver.PhantomJS(executable_path=executable_path)
| UTF-8 | Python | false | false | 281 | py | 9 | phantomjs.py | 7 | 0.747331 | 0.747331 | 0 | 7 | 39.142857 | 97 |
lderazo1/social_distancing | 111,669,169,437 | 92cdfdb8373d9f4babf0cb102bb017caded40820 | 9f60ec08a351b9b91117c2f7db220bc35c8bf715 | /variables_globales.py | 4fde2269dc1f58e1a000df6de83c81de8c64163b | [] | no_license | https://github.com/lderazo1/social_distancing | 07949dd32dc6441aa99fae9fbc6f158bb2a746b8 | 2a08e1b537ffdb3f408cc1759319d199d76cec10 | refs/heads/master | 2023-06-01T03:42:13.058619 | 2021-06-29T16:47:00 | 2021-06-29T16:47:00 | 381,424,817 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | DISTANCIA = 150 #Distancia segura
#CARGA YOLO V3
YOLOV3_NOMBRES = './yolov3_library/coco.names'
YOLOV3_CONFIGURACIONES = './yolov3_library/yolov3.cfg'
YOLOV3_PESOS = './yolov3_library/yolov3.weights'
#VIDEOS Y SALIDAS
VIDEO_PRUEBA = './videos_prueba/video5.mp4'
SALIDA = './procesado/resultado5.avi' | UTF-8 | Python | false | false | 299 | py | 4 | variables_globales.py | 2 | 0.759197 | 0.70903 | 0 | 8 | 36.5 | 54 |
cormackikkert/competitive-programming | 7,069,516,191,075 | d86c1c3fdb542e5aefc2198f708eeb05b272296f | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Beginner 150/C.py | e2d9b1e6622e0ce3f06c34b266df79f54e57df6e | [] | no_license | https://github.com/cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | N = int(input())
P1 = tuple(map(int, input().split()))
P2 = tuple(map(int, input().split()))
import itertools
a = 0
b = 0
for perm in itertools.permutations([i+1 for i in range(N)]):
if perm <= P1:
a += 1
if perm <= P2:
b += 1
print(abs(b - a)) | UTF-8 | Python | false | false | 269 | py | 867 | C.py | 775 | 0.550186 | 0.516729 | 0 | 13 | 19.769231 | 60 |
CapaFenLisesi/KrauthCourse | 6,854,767,815,157 | 26d055de1ded928f92900cd53129cea49aa7ac09 | a246c42ec6a57b06c3b9aee9021fbc3565c4ef01 | /hw_5/A2.py | e736759e7b093687668ed9f82a857baf0b5d4fca | [] | no_license | https://github.com/CapaFenLisesi/KrauthCourse | 478f961657482c5647d8a5dbcc3f4b7a642dffc9 | 83f9cabb2bd69d46dc876a7f6d7eb5af22946c4a | refs/heads/master | 2021-01-22T10:46:53.124079 | 2016-04-23T18:05:08 | 2016-04-23T18:05:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random, math,pylab
import numpy as np
def psi_n_square(x, n):
if n == -1:
return 0.0
else:
psi = [math.exp(-x ** 2 / 2.0) / math.pi ** 0.25]
psi.append(math.sqrt(2.0) * x * psi[0])
for k in range(2, n + 1):
psi.append(math.sqrt(2.0 / k) * x * psi[k - 1] -
math.sqrt((k - 1.0) / k) * psi[k - 2])
return psi[n] ** 2
beta=5.0
locations=[]
ns=[]
x = 0.0001
delta = 0.5
n=1
for k in range(100000):
x_new = x + random.uniform(-delta, delta)
if random.uniform(0.0, 1.0) < \
psi_n_square(x_new,n)/psi_n_square(x,n):
x = x_new
n_new=n + random.choice([-1,1])
if n_new >=0 and random.uniform(0.0,1.0) < psi_n_square(x,n_new)/psi_n_square(x,n)*np.exp(-beta*(n_new-n)):
n=n_new
locations.append(x)
ns.append(n)
xrange=np.arange(-10,10,.1)
pylab.hist(locations,normed=True,label='Histogram')
pylab.plot(xrange, np.sqrt(np.tanh(beta/2) / np.pi)*np.exp( - xrange**2 * np.tanh(beta/2) ),label='pi_quant')
pylab.plot(xrange, np.sqrt(beta/2/ np.pi)*np.exp( - beta * xrange**2/2 ),label='pi_class')
pylab.xlabel('x')
pylab.ylabel('Probability')
pylab.title('Probability to be at location x for beta='+str(beta))
pylab.legend()
pylab.show()
| UTF-8 | Python | false | false | 1,298 | py | 2 | A2.py | 2 | 0.55624 | 0.510786 | 0 | 46 | 27.195652 | 111 |
monk-ee/NHDH | 8,349,416,428,559 | 2268ead62fc3055eef95fa9ee8105fc13f8c469a | 8386d45a367d0ba5d330e036a13833973fc260b2 | /NHDH/modules/py_email.py | ae5081c7359f7db3df60c1428f2bd86891a1d8a5 | [
"Apache-2.0"
] | permissive | https://github.com/monk-ee/NHDH | dbee24c6225519a02719d4aa229886760bc5b558 | 93c449bf3135386ecdd7e4ba719446460f54e32c | refs/heads/master | 2021-01-23T13:32:00.024644 | 2014-06-27T05:35:35 | 2014-06-27T05:35:35 | 14,386,258 | 3 | 2 | null | false | 2014-08-21T05:50:30 | 2013-11-14T05:39:38 | 2014-06-27T00:31:02 | 2014-06-27T05:35:37 | 29,448 | 1 | 2 | 4 | JavaScript | null | null | import smtplib
from NHDH import app
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def py_email(SUBJECT, BODY):
"""With this function we send out our html email"""
for recipient in app.config['CONFIG']['recipients']:
# Create message container - the correct MIME type is multipart/alternative here!
MESSAGE = MIMEMultipart('alternative')
MESSAGE['subject'] = SUBJECT
MESSAGE['To'] = recipient['address']
MESSAGE['From'] = str(app.config['CONFIG']['smtp']['sender_address'])
MESSAGE.preamble = """
Your mail reader does not support the report format.
Please visit us <a href="http://www.mysite.com">online</a>!"""
# Record the MIME type text/html.
HTML_BODY = MIMEText(BODY, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
MESSAGE.attach(HTML_BODY)
# The actual sending of the e-mail
server = smtplib.SMTP(app.config['CONFIG']['smtp']['server']+':'+app.config['CONFIG']['smtp']['port'])
# Print debugging output when testing
if __name__ == "__main__":
server.set_debuglevel(1)
server.starttls()
server.login(app.config['CONFIG']['smtp']['user'],app.config['CONFIG']['smtp']['password'])
server.sendmail(str(app.config['CONFIG']['smtp']['sender_address']), [recipient['address']], MESSAGE.as_string())
server.quit() | UTF-8 | Python | false | false | 1,576 | py | 29 | py_email.py | 17 | 0.631345 | 0.628173 | 0 | 38 | 40.5 | 121 |
zhencliu/turtle | 17,291,538,350,332 | c6e6d8d0e54e3e7c75e405593564901d50eae972 | 0a6b3ccafcafa517c3505d46c45adc3cb5e5511d | /arithmetic/expression.py | 6ac69024ed124c5c3c64ac885b66c6999e82903b | [] | no_license | https://github.com/zhencliu/turtle | 3fba0d4e1bf879c21f820cd8d766964422d0bc86 | 6142566a5967be17b616b3db153136b4b521b814 | refs/heads/master | 2020-09-23T01:01:07.141612 | 2019-12-02T11:40:51 | 2019-12-12T06:29:52 | 225,360,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
class Expression(object):
def __init__(self, min, max, operators):
self._min = min
self._max = max
self._operators = operators.split()
self.expr = None
self.result = None
def gen_expr(self):
x, y = random.sample(range(self._min, self._max), 2)
oper = random.choice(self._operators)
expr = '{x} {oper} {y}'
result = eval(expr.format(x=x, oper=oper, y=y))
(x, y) = (y, x) if result < 0 else (x, y)
print(x, y)
self.expr = expr.format(x=' '.join([s for s in str(x)]),
oper=oper,
y=' '.join([s for s in str(y)])).split()
self.expr.append('=')
self.expr.append('?')
print(self.expr)
self.result = result if result > 0 else -result
if __name__ == '__main__':
arith = Expression(0, 20, '- +')
arith.gen_expr()
print(self.expr)
print(self.result)
| UTF-8 | Python | false | false | 977 | py | 5 | expression.py | 3 | 0.495394 | 0.489253 | 0 | 33 | 28.575758 | 72 |
JoshAddington/blog | 14,577,119,030,839 | 5876120a7e2151b1d4cef2d694af4767eea4dd90 | f60a837fd5f57f211088b120158c2fe04c314deb | /mysite/citibike/tasks.py | 99b25d0105e7710ad209785ce6c030a7a593cca4 | [] | no_license | https://github.com/JoshAddington/blog | a66acb000acf62f53d65373c2c3260d9e3dfeafa | aaa5211f8da339cee0d07b091d275f8a2fdeeb35 | refs/heads/master | 2021-01-21T18:11:52.880115 | 2015-10-28T12:47:08 | 2015-10-28T12:47:08 | 33,373,280 | 0 | 0 | null | false | 2015-10-28T12:47:08 | 2015-04-03T17:01:02 | 2015-10-14T00:07:16 | 2015-10-28T12:47:08 | 5,076 | 1 | 0 | 0 | HTML | null | null | from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from datetime import datetime
from .models import TaskHistory
from .utils import scrape_citibike_json
logger = get_task_logger(__name__)
# schedule scraper to run every ten minutes
@periodic_task(run_every=(crontab(minute="*/10")), ignore_result=True)
def scrape():
logger.info("Start Citibike Scrape")
now = datetime.now()
date_now = now.strftime("%d-%m-%Y %H:%M:%S")
result = scrape_citibike_json.scrape_json()
name = "citibike_scraper"
taskhistory = TaskHistory.objects.get_or_create(name=name)[0]
taskhistory.history.update({date_now: result})
taskhistory.save()
logger.info("Task finished: result= %s" % result)
| UTF-8 | Python | false | false | 787 | py | 39 | tasks.py | 26 | 0.72554 | 0.721728 | 0 | 22 | 34.772727 | 70 |
saiprakash1916/python-practice | 15,358,803,054,962 | 6cb4b36517073c49d537cfb27989e38ee166ee68 | 16764e6bc4ef3262d681e5f9aeec4aab38a2ed67 | /assignment 4.py | 3b7200a279fd3679b543e37fea7142e32426686d | [] | no_license | https://github.com/saiprakash1916/python-practice | 9877d584cfb01052a5c0e5340d8cc49acff6f0ed | 560c1d382d9bbaab46c12fa3c36fdeab2bf6eae0 | refs/heads/master | 2023-03-19T13:56:09.731518 | 2021-03-17T01:21:21 | 2021-03-17T01:21:21 | 348,541,516 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Write a Python program in which a student enters the number of college credits earned. If
the number of credits is greater than 90, 'Senior Status' is displayed; if greater than 60,
'Junior Status' is displayed; if greater than 30, 'Sophomore Status' is displayed; else,
'Freshman Status' is displayed."""
credits = int(input("Enter the credits: "))
if (credits>=90):
print("Senior status")
elif (credits>=60):
print("Junior status")
elif (credits>=30):
print("Sophomore status")
else:
print("Freshman status") | UTF-8 | Python | false | false | 529 | py | 50 | assignment 4.py | 49 | 0.720227 | 0.697543 | 0 | 13 | 39.769231 | 92 |
lkry95/Black-Jack | 14,121,852,511,374 | 04f8bdf784c1d1e7322a0dd153beb21780860841 | ede66c9dac612c84efe006efbd270f4aea6dfe86 | /black_jack_game.py | 1f8eec199014f7d4f15c25232dff33769c50aecf | [] | no_license | https://github.com/lkry95/Black-Jack | 99d0366a88ada05c7744b1c90dd10bbe621a37f2 | 8e0f41b92de61e25be7bdcccd6a4d7de668dd250 | refs/heads/main | 2023-02-21T06:48:29.370875 | 2020-12-30T17:44:14 | 2020-12-30T17:44:14 | 317,320,329 | 0 | 4 | null | false | 2020-12-03T20:05:19 | 2020-11-30T19:08:47 | 2020-12-02T14:31:44 | 2020-12-03T20:05:19 | 3 | 0 | 1 | 0 | Python | false | false | import black_jack
def create_deck():
deck_of_cards = black_jack.Deck()
shuffled_cards = deck_of_cards.deck_cards()
return shuffled_cards
def gameplay():
deck = create_deck()
deal_cards = black_jack.Deal(deck)
dealing_cards = deal_cards.deal_cards(deck)
player_hand = dealing_cards[1]
dealer_hand = dealing_cards[0]
deck = dealing_cards[2]
calculation = black_jack.Calculation()
player_points = calculation.point_calc(player_hand)
dealer_points = calculation.point_calc(dealer_hand)
print("This is your hand: ")
print(player_hand)
print(f'Your points are: {player_points}')
print(f"These are the dealer's cards: {dealer_hand[0]} and another hidden card")
game_continue = True
if player_points == 21:
print("You got really lucky! You win!")
game_continue = False
while game_continue:
hit_or_stay = input('Do you want to hit(h) or stay(s)? ')
print(f"These are the dealer's cards: {dealer_hand[0]} and another hidden card")
if hit_or_stay == 'h':
player = black_jack.Player()
player_hit = player.player_hit(player_hand, deck)
# print(player_hit)
player_points = player_hit[0]
print("This is your hand: ")
print(player_hand)
print(f'Your points are: {player_points}')
if player_points == 21:
print("You win!")
print(f'This is the dealer hand {dealer_hand}')
print(f'Dealer points are: {dealer_points}')
game_continue = False
elif player_points > 21:
print("You lose!")
print(f'This is the dealer hand {dealer_hand}')
print(f'Dealer points are: {dealer_points}')
game_continue = False
elif hit_or_stay == 's':
dealer = black_jack.Dealer()
dealer_hit = dealer.dealer_hit(dealer_hand, deck)
dealer_points = dealer_hit[0]
print(f'Your points are: {player_points}')
print(f'This is the dealer hand {dealer_hand}')
print(f'Dealer points are: {dealer_points}')
if player_points > dealer_points:
print('You win!')
game_continue = False
elif dealer_points > 21:
print('You win!')
game_continue = False
elif dealer_points > player_points:
print("You lose!")
game_continue = False
else:
print('The house always wins!')
game_continue = False
def game_loop():
play_again = True
while play_again:
gameplay()
yes_or_no = input("Would you like to play again? ")
if yes_or_no == 'n':
play_again = False
game_loop()
| UTF-8 | Python | false | false | 2,870 | py | 7 | black_jack_game.py | 3 | 0.553659 | 0.548432 | 0 | 87 | 31.988506 | 89 |
VTantillo/tech_kings3 | 7,318,624,290,992 | edb8129cac95cb9b0aaf5e6de12dc89245304f6c | b231404e355f85b9dbdb9299e8307bc0d012ec17 | /tbms/src/sub_workshop/ws_manager.py | fa3bf49c59a6dab6ba535b27df5168c1616d7f75 | [] | no_license | https://github.com/VTantillo/tech_kings3 | e5df98df259ab533dc9deb4bdb6b4c7c624e9f0d | e8f4e8370cf6dff4d8791d496221caa15f652b5b | refs/heads/master | 2021-09-01T21:28:55.724324 | 2017-12-15T23:20:08 | 2017-12-15T23:20:08 | 111,738,303 | 0 | 0 | null | false | 2017-12-15T23:21:39 | 2017-11-22T22:26:01 | 2017-12-15T23:20:13 | 2017-12-15T23:21:39 | 422 | 0 | 0 | 0 | Python | false | null | """
Interface for the workshop subsystem.
"""
import network_adapter
import snapshot
from workshop_unit import WorkshopUnit
from workshop_group import WorkshopGroup
from virtual_machine import VirtualMachine
from src.sub_db.db_manager import WorkshopDB
import workshop_unit
import workshop_group
def create(item_name, fields):
if item_name == 'virtual machine':
# Add to Database
"""if WorkshopDB.update('virtual machine', fields):
# vm in database so update
pass
else:
WorkshopDB.create('virtual machine', fields)"""
return VirtualMachine(fields['name'],
fields['id'],
fields['adapter'],
fields['port'],
fields['recent_snapshot'],
fields['host_ip'])
def read(item_name, item=None):
return WorkshopDB.read(item_name, item)
def update():
pass
def delete():
pass
def clone():
pass
def port():
pass
def convert_query_list_to_wg_instance_list(wg_query_list):
groups = []
for g in wg_query_list:
groups.append(WorkshopGroup(g.id, g.name, g.description, g.status, g.lifetime, g.published_date,
g.server_id))
return groups
def convert_query_list_to_wu_instance_list(wu_query_list):
units = []
for u in wu_query_list:
units.append(WorkshopUnit(u.id, u.name, u.description, 'N/A', u.status, u.lifetime, u.published_date,
u.server_id, u.wg_id))
return units
| UTF-8 | Python | false | false | 1,616 | py | 72 | ws_manager.py | 47 | 0.581683 | 0.581683 | 0 | 65 | 23.861538 | 109 |
heamabc/MyProjects | 12,403,865,556,304 | 3648758546ac6217dfd77d30c7c774cdbc5dd7b7 | 6b1327647debe40d47dd1eb6f7d8c6598e26ad1a | /PortMgmt/Python/UpdatePortfolio/mgmtPort.py | 637978276641b5feb849daa6e8aae5483c71c21e | [] | no_license | https://github.com/heamabc/MyProjects | 0e6dc718646ca9b525a7228c75e0c02a732fae74 | d1fe8d499f6ef09dcfa788f142dcd6463d9ae987 | refs/heads/master | 2021-09-08T04:32:54.296424 | 2018-03-07T01:43:41 | 2018-03-07T01:43:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Manage Portfolio and update table PortPosition
Created on Feb 1, 2017
@author: ywang
'''
import pandas as pd
from pandas import DataFrame, Series
import sqlalchemy
from urllib.parse import quote_plus
import numpy as np
from numpy import datetime64
SQL_PULL_HOLDING_POSITION = '''
SELECT [TradeDate] AS [Date], [Ticker], [Transaction], [Totalshares], [SharePrice]
FROM TransactionData.DFA_401K
ORDER BY TradeDate
'''
SQL_PULL_MF_PRICE = '''
SELECT [Date], [Ticker], [Close]
FROM MutualFundData.DFA_401K
'''
def reindex_by_date(g, dt):
_df = g[['CumShares', 'AverageCost']].copy()
_df.index = g.Date
max_dt = max(datetime64(_df.index.max()), dt.max())
min_dt = datetime64(_df.index.min())
dates = dt[(dt>=min_dt) & (dt<=max_dt)]
return _df.reindex(dates, method='ffill')
def get_cumsum_adj_and_cost(_df):
# cumulative sum of shares, reset the CumShares to 0, if it's too small
# get average cost over time
df_adj = _df.copy()
df_adj['CumShares'] = _df['Totalshares']
df_adj['AverageCost'] = _df['SharePrice']
N = df_adj.index.size
for k in range(1,N):
temp = df_adj.CumShares[df_adj.index[k-1]] + df_adj.Totalshares[df_adj.index[k]]
if np.abs(temp) < 0.01:
temp = 0
df_adj.ix[df_adj.index[k], 'CumShares'] = temp
for k in range(1,N):
transaction_amount = df_adj.ix[df_adj.index[k],'SharePrice'] * df_adj.ix[df_adj.index[k],'Totalshares']
if transaction_amount > 0:
total_cost_prev = df_adj.ix[df_adj.index[k-1], 'AverageCost'] * df_adj.ix[df_adj.index[k-1], 'CumShares']
df_adj.ix[df_adj.index[k], 'AverageCost'] = (total_cost_prev + transaction_amount) / df_adj.ix[df_adj.index[k], 'CumShares']
else:
df_adj.ix[df_adj.index[k], 'AverageCost'] = df_adj.ix[df_adj.index[k-1], 'AverageCost']
return df_adj[['Date', 'Ticker', 'CumShares', 'AverageCost']].reset_index(drop=True)
def resetPort():
pass
def updatePort():
pass
#===============================================================================
# Main Script
#===============================================================================
if __name__== "__main__":
account_name = 'DFA_401K'
################ Pull transaction data
params = quote_plus("DRIVER={SQL Server}; SERVER=ASTJ9K2Y52RESR\SYW_LOCAL_V2014; DATABASE=PortMgmt; Trusted_Connection=yes")
engine = sqlalchemy.create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
df_transaction = pd.read_sql(SQL_PULL_HOLDING_POSITION, engine)
df_transaction.sort_values(by='Date', inplace=True)
# There are multiple transaction type in a given day, must aggregate
df_position = df_transaction.groupby(['Date','Ticker','SharePrice'])[['Totalshares']].sum()
df_position.reset_index(inplace=True)
# Get cumulative position
cum_position_adj = df_position.groupby('Ticker').apply(get_cumsum_adj_and_cost)
df_position = df_position.merge(cum_position_adj, how='left', on=['Date', 'Ticker'])
################## Get Price
start_dt = df_transaction.Date.min()
df_price = pd.read_sql(SQL_PULL_MF_PRICE+'WHERE [Date]>='+'\''+start_dt.strftime('%Y/%m/%d')+'\'', engine)
busdate = df_price.Date.unique()
################# Create daily position ##################
df_pos_daily = df_position.groupby('Ticker').apply(reindex_by_date, dt=busdate)
# sdel df_position
df_pos_daily.reset_index(inplace=True)
df_pos_daily.sort_values(by=['Date','Ticker'], inplace=True)
# add price
df_pos_daily = df_pos_daily.merge(df_price, how='left', on=['Date','Ticker'])
df_pos_daily = df_pos_daily[['Date', 'Ticker', 'CumShares', 'Close', 'AverageCost']]
# Add value=$1 for VMMXX, GVMXX
df_pos_daily.ix[df_pos_daily.Ticker.isin(['GVMXX', 'VMMXX']),'Close'] = 1
# Check if there is any date with missing price
date_price_missing = df_pos_daily[df_pos_daily.Close.isnull()].Date.unique()
# Drop date with missing price
df_pos_daily = df_pos_daily[~df_pos_daily.Date.isin(date_price_missing)]
# Dollar position
df_pos_daily['Amount'] = df_pos_daily['CumShares'] * df_pos_daily['Close']
df_pos_daily.query('CumShares!=0', inplace=True)
################# Contribution ##################
df_contrib = df_transaction.query('Transaction=="ACH Contribution"')
df_contrib = df_contrib.groupby(['Date', 'Ticker']).sum()
df_contrib.reset_index(1, inplace=True)
df_contrib['Date'] = df_contrib.index
df_contrib = df_contrib.merge(df_price, how='left', on=['Date', 'Ticker'])
df_contrib.ix[df_contrib.Ticker.isin(['GVMXX', 'VMMXX']),'Close'] = 1
df_contrib['Amount'] = df_contrib['Totalshares'] * df_contrib['Close']
df_contrib = df_contrib.groupby('Date')[['Amount']].sum()
################# Dividend ##########################
DIV_LIST = ['Ordinary Dividend Reinvestment Increase',
'Daily Accrual Dividend Reinvestment Incr',
'Long Term Capital Gain Reinvestment',
'Short Term Capital Gain Reinvestment',
'Earnings Allocation',
'Increase Earnings']
df_dividend = df_transaction.query('Transaction==@DIV_LIST')
df_dividend = df_dividend.groupby(['Date', 'Ticker'])[['Totalshares']].sum()
df_dividend.reset_index(1, inplace=True)
df_dividend['Date'] = df_dividend.index
df_dividend = df_dividend.merge(df_price, how='left', on=['Date', 'Ticker'])
df_dividend.ix[df_dividend.Ticker.isin(['GVMXX', 'VMMXX']),'Close'] = 1
df_dividend['Amount'] = df_dividend['Totalshares'] * df_dividend['Close']
df_dividend = df_dividend.groupby('Date')[['Amount']].sum()
################# Daily Table #######################
df_portfolio = DataFrame(index=df_pos_daily.Date.unique(), columns=['Balance', 'Contribution', 'Dividend'])
df_portfolio['Balance'] = df_pos_daily.groupby('Date')['Amount'].sum()
df_portfolio['Contribution'] = df_contrib['Amount']
df_portfolio.ix[df_portfolio.Contribution.isnull(), 'Contribution'] = 0
df_portfolio['Dividend'] = df_dividend['Amount']
df_portfolio.ix[df_portfolio.Dividend.isnull(), 'Dividend'] = 0
bal_less_contribution = (df_portfolio['Balance'] - df_portfolio['Contribution']).values
bal_initial = df_portfolio.Balance.values
df_portfolio['Return'] = np.nan
df_portfolio.ix[1:, 'Return'] = bal_less_contribution[1:] / bal_initial[:-1] - 1
################ Commit to database
df_portfolio.to_sql(account_name, engine, schema='Portfolio', if_exists='replace', index_label='Date')
df_pos_daily.index = df_pos_daily.Date
df_pos_daily.drop('Date', axis=1, inplace=True)
df_pos_daily.to_sql(account_name, engine, schema='Position', if_exists='replace', index_label='Date')
| UTF-8 | Python | false | false | 7,132 | py | 28 | mgmtPort.py | 10 | 0.589456 | 0.581884 | 0 | 191 | 36.026178 | 137 |
mehak5868/Training | 2,310,692,432,910 | 640f1e1d7dfe1da9a127de51166ccaa86faed6f3 | 33fb697b05475205881d90a0a56d1dae3afdf5ff | /venv/Session16B.py | 0f7c240a681a9dea61c6c218e5e5d58c46351ea0 | [] | no_license | https://github.com/mehak5868/Training | ca3a0c8640df3efd9481b7e473965e6a123105bc | b40d6c6b6291b332856ee50f583f7fb05ad0a401 | refs/heads/master | 2020-04-29T15:46:37.977067 | 2019-04-02T17:04:39 | 2019-04-02T17:04:39 | 176,239,402 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | file =open("idea.xml","r")
data =file.readlines()
for line in data:
print(line)
file.close()
| UTF-8 | Python | false | false | 100 | py | 81 | Session16B.py | 73 | 0.64 | 0.64 | 0 | 5 | 18.4 | 26 |
dennysaug/algoritmo-kruskal | 15,195,594,312,936 | fa5d3a93859a60e2edabbbafe36b0a0b94599a58 | acd35bdb35a0ce950ecdde72db72a86f42c4ac81 | /main.py | 576d9d1184b74a59c098525212d91ed2783077fe | [] | no_license | https://github.com/dennysaug/algoritmo-kruskal | 5516772d5e36bbc44625ba02e233b0d2abdafc1e | d93f35930b763ff8521ce2d1b25bdea0008f7c3a | refs/heads/master | 2021-08-19T14:49:45.870300 | 2017-11-26T18:44:35 | 2017-11-26T18:44:35 | 112,107,232 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
def main():
continua = True
pesos = []
vertice = []
while continua:
os.system('clear')
print "[*] 1 - Adicionar vertice"
print "[*] 2 - Adicionar arestas com os pesos"
print "[*] 3 - Rodar algoritmo de Kruskal"
print "[*] 4 - Sair\n\n"
op = raw_input('Qual a opcao desejada: ')
if op == '1':
print 'Adicionando vertice. Digite 0 para sair\n\n'
ok = True
while ok:
v = raw_input('Vertice: ')
if v == '0':
break
vertice.append(v)
if op == '2':
print vertice
print 'Adicionando aresta com os pesos. Digite 0 para sair\n\n'
print 'Exemplo:\nAresta: A-B\nPeso: 5\n\n'
ok = True
while ok:
a = raw_input('Aresta: ')
p = raw_input('Peso: ')
print "\n"
if a == '0' or p == '0':
break
dados = {'a': a, 'p': p}
pesos.append(dados)
if op == '3':
print 'Rodando o algoritmo de Kruskal\n\n'
pesos = sorted(pesos, key=lambda x: x['p'])
for peso in pesos:
print peso['a'] + ': ' + str(peso['p'])
return 0
if op == '4':
print 'Saindo...\n\n'
return 0
return 0
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 1,477 | py | 1 | main.py | 1 | 0.417062 | 0.405552 | 0 | 64 | 22.09375 | 75 |
mik-79-ekb/Python_start | 15,101,105,032,739 | fa922ff31715521b6763d50cc6cac79d3d25b63a | 5ab30e31fbfd7e62551e62ddbe2b74af7975012a | /Lesson_7/HW_7.3.py | 0decebf0318d1ce439d36159952cd175d08d0384 | [] | no_license | https://github.com/mik-79-ekb/Python_start | 141d080eae7a68ec582586ba98893715efaa0972 | f313fa0aca3fe2b40d29cb7bc877a5d98840ef0c | refs/heads/main | 2023-01-23T02:41:19.387760 | 2020-12-12T17:53:59 | 2020-12-12T17:53:59 | 308,719,179 | 1 | 0 | null | false | 2020-12-12T17:54:00 | 2020-10-30T18:58:02 | 2020-11-30T17:29:26 | 2020-12-12T17:53:59 | 23 | 0 | 0 | 0 | Python | false | false | """
Task 7.3
"""
class Kletka:
def __init__(self, index):
self.index = int(index)
def __add__(self, other):
return f'Клетка увеличилась, ее размер стал: {self.index + other.index}'
def __sub__(self, other):
return f'Клетка уменьшилась, ее размер стал: {self.index - other.index}' if self.index - other.index > 0 else f'Уменьшение клетки невозможно!'
def __mul__(self, other):
return f'Клетка разрослась, ее размер стал: {self.index * other.index}'
def __truediv__(self, other):
return f'Клетка разделилась, ее размер стал: {self.index // other.index}'
def make_order(self, row):
result = ''
for i in range(int(self.index / row)):
result += '*' * row + '\n'
result += '*' * (self.index % row) + '\n'
return result
k_1 = Kletka(12)
k_2 = Kletka(5)
print(k_1.__add__(k_2))
print(k_1.__sub__(k_2))
print(k_1.__mul__(k_2))
print(k_1.__truediv__(k_2))
print(f'Разбиение клетки k_1:')
print(k_1.make_order(5))
print(f'Разбиение клетки k_2:')
print(k_2.make_order(5)) | UTF-8 | Python | false | false | 1,240 | py | 42 | HW_7.3.py | 39 | 0.5884 | 0.56782 | 0 | 32 | 32.4375 | 150 |
nimra/module_gen | 5,755,256,187,493 | 87dfadf4333b8f87b071340dd312b8b9f524a5d3 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Geron17Hands/B_PartI/D_Chapter4/B_GradientDescent/index.py | 89a67bbbe99b4a411b3624f6ee3c03084f11edbc | [] | no_license | https://github.com/nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_BatchGradient.index import BatchGradient as A_BatchGradient
from .B_StochasticGradient.index import StochasticGradient as B_StochasticGradient
from .C_MinibatchGradient.index import MinibatchGradient as C_MinibatchGradient
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# Gradient Descent
# Gradient Descent is a very generic optimization algorithm capable of finding optimal
# solutions to a wide range of problems. The general idea of Gradient Descent is to
# tweak parameters iteratively in order to minimize a cost function.
# Suppose you are lost in the mountains in a dense fog; you can only feel the slope of
# the ground below your feet. A good strategy to get to the bottom of the valley quickly
# is to go downhill in the direction of the steepest slope. This is exactly what Gradient
# Descent does: it measures the local gradient of the error function with regards to the
# parameter vector θ, and it goes in the direction of descending gradient. Once the gra‐
# dient is zero, you have reached a minimum!
# Concretely, you start by filling θ with random values (this is called random initializa‐
# tion), and then you improve it gradually, taking one baby step at a time, each step
# attempting to decrease the cost function (e.g., the MSE), until the algorithm converges
# to a minimum (see Figure 4-3).
#
#
#
#
# Figure 4-3. Gradient Descent
#
# An important parameter in Gradient Descent is the size of the steps, determined by
# the learning rate hyperparameter. If the learning rate is too small, then the algorithm
# will have to go through many iterations to converge, which will take a long time (see
# Figure 4-4).
#
#
#
#
# Gradient Descent | 111
#
# Download from finelybook www.finelybook.com
#
#
#
#
# Figure 4-4. Learning rate too small
#
# On the other hand, if the learning rate is too high, you might jump across the valley
# and end up on the other side, possibly even higher up than you were before. This
# might make the algorithm diverge, with larger and larger values, failing to find a good
# solution (see Figure 4-5).
#
#
#
#
# Figure 4-5. Learning rate too large
#
# Finally, not all cost functions look like nice regular bowls. There may be holes, ridges,
# plateaus, and all sorts of irregular terrains, making convergence to the minimum very
# difficult. Figure 4-6 shows the two main challenges with Gradient Descent: if the ran‐
# dom initialization starts the algorithm on the left, then it will converge to a local mini‐
# mum, which is not as good as the global minimum. If it starts on the right, then it will
# take a very long time to cross the plateau, and if you stop too early you will never
# reach the global minimum.
#
#
#
#
# 112 | Chapter 4: Training Models
#
# Download from finelybook www.finelybook.com
#
#
#
#
# Figure 4-6. Gradient Descent pitfalls
#
# Fortunately, the MSE cost function for a Linear Regression model happens to be a
# convex function, which means that if you pick any two points on the curve, the line
# segment joining them never crosses the curve. This implies that there are no local
# minima, just one global minimum. It is also a continuous function with a slope that
# never changes abruptly.4 These two facts have a great consequence: Gradient Descent
# is guaranteed to approach arbitrarily close the global minimum (if you wait long
# enough and if the learning rate is not too high).
# In fact, the cost function has the shape of a bowl, but it can be an elongated bowl if
# the features have very different scales. Figure 4-7 shows Gradient Descent on a train‐
# ing set where features 1 and 2 have the same scale (on the left), and on a training set
# where feature 1 has much smaller values than feature 2 (on the right).5
#
#
#
#
# Figure 4-7. Gradient Descent with and without feature scaling
#
#
#
# 4 Technically speaking, its derivative is Lipschitz continuous.
# 5 Since feature 1 is smaller, it takes a larger change in θ1 to affect the cost function, which is why the bowl is
# elongated along the θ1 axis.
#
#
#
# Gradient Descent | 113
#
# Download from finelybook www.finelybook.com
# As you can see, on the left the Gradient Descent algorithm goes straight toward the
# minimum, thereby reaching it quickly, whereas on the right it first goes in a direction
# almost orthogonal to the direction of the global minimum, and it ends with a long
# march down an almost flat valley. It will eventually reach the minimum, but it will
# take a long time.
#
# When using Gradient Descent, you should ensure that all features
# have a similar scale (e.g., using Scikit-Learn’s StandardScaler
# class), or else it will take much longer to converge.
#
#
#
# This diagram also illustrates the fact that training a model means searching for a
# combination of model parameters that minimizes a cost function (over the training
# set). It is a search in the model’s parameter space: the more parameters a model has,
# the more dimensions this space has, and the harder the search is: searching for a nee‐
# dle in a 300-dimensional haystack is much trickier than in three dimensions. Fortu‐
# nately, since the cost function is convex in the case of Linear Regression, the needle is
# simply at the bottom of the bowl.
#
# Batch Gradient Descent
# To implement Gradient Descent, you need to compute the gradient of the cost func‐
# tion with regards to each model parameter θj. In other words, you need to calculate
# how much the cost function will change if you change θj just a little bit. This is called
# a partial derivative. It is like asking “what is the slope of the mountain under my feet
# if I face east?” and then asking the same question facing north (and so on for all other
# dimensions, if you can imagine a universe with more than three dimensions). Equa‐
# tion 4-5 computes the partial derivative of the cost function with regards to parame‐
# ∂
# ter θj, noted ∂θ MSE θ .
# j
#
#
# Equation 4-5. Partial derivatives of the cost function
#
# ∂ 2 m T
# mi∑
# i
# MSE θ = θ · � − y i x ji
# ∂θ j =1
#
#
# Instead of computing these gradients individually, you can use Equation 4-6 to com‐
# pute them all in one go. The gradient vector, noted ∇θMSE(θ), contains all the partial
# derivatives of the cost function (one for each model parameter).
#
#
#
#
# 114 | Chapter 4: Training Models
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Gradient Descent",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class GradientDescent(HierNode):
def __init__(self):
super().__init__("Gradient Descent")
self.add(Content(), "content")
self.add(A_BatchGradient())
self.add(B_StochasticGradient())
self.add(C_MinibatchGradient())
# eof
| UTF-8 | Python | false | false | 8,250 | py | 2,642 | index.py | 1,350 | 0.67322 | 0.666382 | 0 | 191 | 41.874346 | 122 |
levent-coban/flaskformexamples | 13,434,657,734,627 | 67383546003de4f9957c0e3913dd135dd09bae8c | 6b293f11f65a62de082a8d1cb4123245ed6257a6 | /FORM-GET-EXAMPLE-1/app.py | c37e0623ddd445b99029638b859d05cbb4140ae6 | [] | no_license | https://github.com/levent-coban/flaskformexamples | 2bb0417c4008e622a1aa99baa500d9e5fc6d3942 | a2868e245f61889bd254b20d95a5f52da3c8a87e | refs/heads/main | 2023-04-01T07:41:19.913785 | 2021-03-31T22:48:22 | 2021-03-31T22:48:22 | 353,510,652 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
if request.args:
# request.args: the key/value pairs in the URL query string
# print(request.args['firstname'])
# print(request.args['lastname'])
lst = {
'first_name': request.args['firstname'],
'last_name': request.args['lastname']
}
return render_template('index.html', list=lst)
return render_template('index.html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=80, debug=True)
| UTF-8 | Python | false | false | 588 | py | 4 | app.py | 3 | 0.581633 | 0.568027 | 0 | 26 | 21.615385 | 67 |
TheJoebus666/Team7Robotics | 2,576,980,424,099 | 982c129269fd60bd945a64bad1fb3a9276880179 | b91a810bf1dde97aa99a39ca95caf5f55f385a54 | /dqn_environment.py | 2b4ece28d9636275261d67b671978648eb3e0d48 | [] | no_license | https://github.com/TheJoebus666/Team7Robotics | 8ea428754c930bced2ef27970d8a53fa2f6e0f4f | c852e9d57f46d77cd4205d208b7b56a3472472fd | refs/heads/master | 2023-05-06T15:30:16.039172 | 2021-04-12T14:18:41 | 2021-04-12T14:18:41 | 352,585,836 | 1 | 1 | null | false | 2021-04-12T14:06:49 | 2021-03-29T09:25:25 | 2021-04-08T14:44:10 | 2021-04-12T14:06:48 | 23,183 | 1 | 1 | 0 | Python | false | false | #!/usr/bin/env python3
#
# Copyright 2019 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Ryan Shim, Gilbert
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, qos_profile_sensor_data
from turtlebot3_msgs.srv import Dqn
from my_msgs.srv import Goal
from std_srvs.srv import Empty
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
import numpy as np
import math
class RLEnvironment(Node):
def __init__(self):
super().__init__('rl_environment')
self.train_mode = True
self.goal_pose_x = 0.0
self.goal_pose_y = 0.0
self.robot_pose_x = 0.0
self.robot_pose_y = 0.0
self.action_size = 5
self.time_out = 1000000 # maximum number of actions in each episode
self.done = False
self.fail = False
self.succeed = False
# parameters to calculate the reward
self.goal_angle = 0.0
self.goal_distance = 1.0
self.init_goal_distance = 0.1
self.scan_ranges = []
self.min_obstacle_distance = 10.0
self.local_step = 0
self.stop_cmd_vel_timer = None
self.angular_vel = [1.0, 0.5, 0.0, -0.5, -1.0]
qos = QoSProfile(depth=10)
# Initialize publisher
self.cmd_vel_pub = self.create_publisher(Twist, 'cmd_vel', qos)
# Initialize subscribers
self.odom_sub = self.create_subscription(Odometry, 'odom', self.odom_sub_callback, qos)
self.scan_sub = self.create_subscription(LaserScan, '/turtlebot3_laserscan/out', self.scan_sub_callback, qos_profile_sensor_data)
# Initialize client
self.clients_callback_group = MutuallyExclusiveCallbackGroup()
self.task_succeed_client = self.create_client(Goal, 'task_succeed', callback_group=self.clients_callback_group)
self.task_failed_client = self.create_client(Goal, 'task_failed', callback_group=self.clients_callback_group)
self.initialize_environment_client = self.create_client(Goal, 'initialize_env',callback_group=self.clients_callback_group)
# Initialize service
self.rl_agent_interface_service = self.create_service(Dqn, 'rl_agent_interface',self.rl_agent_interface_callback)
self.make_environment_service = self.create_service(Empty, 'make_environment', self.make_environment_callback)
self.reset_environment_service = self.create_service(Dqn, 'reset_environment', self.reset_environment_callback)
def make_environment_callback(self, request, response):
while not self.initialize_environment_client.wait_for_service(timeout_sec=1.0):
print('environment service ...')
future = self.initialize_environment_client.call_async(Goal.Request())
rclpy.spin_until_future_complete(self, future)
response_future = future.result()
if not response_future.success:
print('initialize environment request failed')
else:
self.goal_pose_x = response_future.pose_x
self.goal_pose_y = response_future.pose_y
print('goal ', self.goal_pose_x, ', ', self.goal_pose_y)
return response
def reset_environment_callback(self, request, response):
#Dqn response (state of the robot: lidar_rays + robot pose (or robots distance to goal and its heading angle to goal)
response.state = self.calculate_state()
return response
def call_task_succeed(self):
"""
When the task is succeed (by reaching the goal) this client will send a request to the gazebo_interface service
the client waits until gets back the response (goal position) form service
:return:
"""
while not self.task_succeed_client.wait_for_service(timeout_sec=1.0):
self.get_logger().warn('service for task succeed is not available, waiting ...')
future = self.task_succeed_client.call_async(Goal.Request())
rclpy.spin_until_future_complete(self, future)
if future.result() is not None:
response = future.result()
self.goal_pose_x = response.pose_x
self.goal_pose_y = response.pose_y
self.get_logger().info('service for task succeed finished')
else:
self.get_logger().error('task succeed service call failed')
def call_task_failed(self):
"""
When the task is failed (either collision or timeout) this client will send a request to the gazebo_interface service
the client waits until gets back the response (goal position) form service
:return:
"""
while not self.task_failed_client.wait_for_service(timeout_sec=1.0):
self.get_logger().warn('service for task failed is not available, waiting ...')
future = self.task_failed_client.call_async(Goal.Request())
rclpy.spin_until_future_complete(self, future)
if future.result() is not None:
response = future.result()
self.goal_pose_x = response.pose_x
self.goal_pose_y = response.pose_y
self.get_logger().info('service for task failed finished')
else:
self.get_logger().error('task failed service call failed')
def scan_sub_callback(self, scan):
self.scan_ranges = [] # clear the list
num_of_lidar_rays = len(scan.ranges)
for i in range(num_of_lidar_rays):
if scan.ranges[i] == float('Inf'):
self.scan_ranges.append(3.5)
elif np.isnan(scan.ranges[i]):
self.scan_ranges.append(0)
else:
self.scan_ranges.append(scan.ranges[i])
self.min_obstacle_distance = min(self.scan_ranges)
def odom_sub_callback(self, msg):
self.robot_pose_x = msg.pose.pose.position.x
self.robot_pose_y = msg.pose.pose.position.y
_, _, self.robot_pose_theta = self.euler_from_quaternion(msg.pose.pose.orientation)
goal_distance = math.sqrt(
(self.goal_pose_x - self.robot_pose_x) ** 2
+ (self.goal_pose_y - self.robot_pose_y) ** 2)
path_theta = math.atan2(
self.goal_pose_y - self.robot_pose_y,
self.goal_pose_x - self.robot_pose_x)
goal_angle = path_theta - self.robot_pose_theta
if goal_angle > math.pi:
goal_angle -= 2 * math.pi
elif goal_angle < -math.pi:
goal_angle += 2 * math.pi
self.goal_distance = goal_distance
self.goal_angle = goal_angle
def calculate_state(self):
"""
calculates the robot state (lidar rays , distance to the goal ,robots heading angle toward the goal)
Checks the task succeed and the task failed
:return:
"""
state = list()
# state.append(float(self.goal_pose_x))
# state.append(float(self.goal_pose_y))
state.append(float(self.goal_distance))
state.append(float(self.goal_angle))
for var in self.scan_ranges:
state.append(float(var))
#state.append(float(0.0))
self.local_step += 1
# Succeed
if self.goal_distance < 0.20: # unit: m
self.get_logger().info("Goal Reached")
self.succeed = True
self.done = True
self.cmd_vel_pub.publish(Twist()) # robot stop
self.local_step = 0
self.call_task_succeed()
self.init_goal_distance = math.sqrt(
(self.goal_pose_x - self.robot_pose_x) ** 2
+ (self.goal_pose_y - self.robot_pose_y) ** 2)
# Fail
if self.min_obstacle_distance < 0.25: # unit: m
self.get_logger().info("Collision happened")
self.fail = True
self.done = True
self.cmd_vel_pub.publish(Twist()) # robot stop
self.local_step = 0
self.call_task_failed()
if self.local_step == self.time_out:
self.get_logger().info("Time out!")
self.done = True
self.local_step = 0
self.call_task_failed()
return state
def calculate_reward(self, action):
"""
calculates the reward accumulating by agent after doing each action, feel free to change the reward function
:return:
"""
if self.train_mode:
yaw_reward = 1 - 2 * math.sqrt(math.fabs(self.goal_angle / math.pi))
distance_reward = (2 * self.init_goal_distance) / (self.init_goal_distance + self.goal_distance) - 1
obstacle_reward = 0.0
if self.min_obstacle_distance < 0.50:
obstacle_reward = -5.0 # self.min_obstacle_distance - 0.45
# reward = self.action_reward[action] + (0.1 * (2-self.goal_distance)) + obstacle_reward
reward = distance_reward + obstacle_reward + yaw_reward
# + for succeed, - for fail
if self.succeed:
print("succeed")
reward = 200.0
elif self.fail:
print("fail")
reward = -150.0
else:
if self.succeed:
reward = 5.0
elif self.fail:
reward = -5.0
else:
reward = 0.0
self.get_logger().info('reward: %f ' % reward)
self.get_logger().info('yaw reward: %f ' % yaw_reward)
return reward
def rl_agent_interface_callback(self, request, response):
"""
gives service to the rl_agent. The rl_agent sends an action as a request and this methods has to does the action
and gets back the state, reward and done as a response
:param request: a DQN request including action
:param response: a DQN response including state, reward, and done
:return:
"""
action = request.action
twist = Twist()
# robot always receives a (constant linear velocity + a variable angular velocity)
twist.linear.x = 0.15
twist.angular.z = self.angular_vel[action]
self.cmd_vel_pub.publish(twist)
if self.stop_cmd_vel_timer is None:
self.stop_cmd_vel_timer = self.create_timer(1.8, self.timer_callback)
else:
self.destroy_timer(self.stop_cmd_vel_timer)
self.stop_cmd_vel_timer = self.create_timer(1.8, self.timer_callback)
response.state = self.calculate_state()
response.reward = self.calculate_reward(action)
response.done = self.done
if self.done is True:
self.done = False
self.succeed = False
self.fail = False
return response
def timer_callback(self):
"""
after each self.stop_cmd_vel_timer seconds, this method will be called to send a stop cmd_vel to the robot
:return:
"""
self.get_logger().info('Stop called')
self.cmd_vel_pub.publish(Twist())
self.destroy_timer(self.stop_cmd_vel_timer)
def euler_from_quaternion(self, quat):
"""
Converts quaternion (w in last place) to euler roll, pitch, yaw
:param quat: [x, y, z, w]
:return:
"""
x = quat.x
y = quat.y
z = quat.z
w = quat.w
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2 * (w * y - z * x)
pitch = np.arcsin(sinp)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def main(args=None):
rclpy.init(args=args)
rl_environment = RLEnvironment()
while True:
rclpy.spin_once(rl_environment)
rclpy.shutdown()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 12,444 | py | 14 | dqn_environment.py | 4 | 0.604147 | 0.594182 | 0 | 337 | 35.925816 | 137 |
IT-corridor/Round-Up | 1,434,519,091,515 | aeae04643062de034ad4f7a5186ad59914cc2c06 | d4831377686b3ff25446af89e2eb0e9e38e68dcb | /main_app/tasks.py | c22655d05ad7a4fd27283f5cc06fb49f2a69d373 | [] | no_license | https://github.com/IT-corridor/Round-Up | 4896484e80cfea4db42a5a7002ccfbb72e03129b | ad757ce1bdedf35b1bdd95096c02fdfed180d4aa | refs/heads/master | 2020-04-03T20:00:04.606916 | 2018-10-31T00:35:05 | 2018-10-31T00:35:05 | 155,543,300 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from decimal import Decimal
import logging
import math
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from dateutil import parser
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMessage
from django.db import transaction, IntegrityError, connection
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils import timezone
from djmoney.money import Money
from pinax.stripe.actions import subscriptions
from pinax.stripe.actions.customers import get_customer_for_user
from pinax.stripe.actions.sources import delete_card
from pinax.stripe.models import Card
import pytz
from stripe import StripeError
from main_app import models
from main_app.helpers.external_helpers.shopify_webhook_controller import get_return_fields
from main_app.helpers.helpers import create_or_assert_round_up_product
from main_app.models import AuthAppShopUser, RoundUpOrders
from source.lib import shopify
from source.lib.pyactiveresource.connection import UnauthorizedAccess, ClientError
from source.lib.shopify.resources.order import Order
from source.lib.shopify.resources.product import Product
from source.lib import shopify
from source.lib.shopify.resources.webhook import Webhook
@periodic_task(run_every=(crontab(minute=0, hour=0)), name="main_app.tasks.sync_store_orders", ignore_result=True)
def sync_store_orders(specific_store=None):
user_list = []
if not specific_store:
user_list = AuthAppShopUser.objects.filter().exclude(token='00000000000000000000000000000000')
if specific_store:
user_list = [specific_store]
for user in user_list:
try:
if not user.userprofile.setup_required:
with shopify.Session.temp(user.myshopify_domain, user.token):
# Track sync times from DB (in UTC)
try:
current_max_created_UTC = user.userprofile.latest_order_sync_time
except ObjectDoesNotExist:
logging.error("USER-{0}-GET-ORDERS-NO-PROFILE".format(str(user)))
continue
new_max_created_UTC = timezone.now()
try:
round_up_variant_id = user.userprofile.round_up_variant_id
if not round_up_variant_id:
raise ValueError
except ValueError:
logging.error("USER-{0}-GET-ORDERS-NO-ROUND-UP-PRODUCT".format(str(user)))
continue
# Create an empty list to hold incoming orders
new_order_list = []
# Set query options for Shopify find orders
if current_max_created_UTC:
# Convert start and end times into store's local timezone.
created_at_min = current_max_created_UTC.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
elif not current_max_created_UTC:
# Only look back as far as the user has existed.
created_at_min = user.userprofile.created.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
created_at_max = new_max_created_UTC.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
order_args = {'status': "any",
'created_at_min': created_at_min,
'created_at_max': created_at_max}
# Iterate through Shopify orders for store
orders_count = Order.count(**order_args)
limit_per_page = 50
pages = math.ceil(orders_count / limit_per_page)
# Iterate through all API Pages.
for i in range(1, int(pages) + 2):
orders = Order.find(limit=limit_per_page, page=i, **order_args)
# Iterate through the current page of Shopify orders to find line items that contain round up products.
for order in orders:
round_up_line_item_id = None
new_order = None
if not order.cancel_reason and order.financial_status in ('paid', 'partially_refunded',
'partially_paid', 'refunded'):
for item in order.line_items:
if item.variant_id == round_up_variant_id:
# Create a Round Up Order Product
new_order = RoundUpOrders(store=user,
order_number=order.id,
order_name=order.name,
order_total=Money(Decimal(order.total_price), order.currency),
order_roundup_total=Money((Decimal(item.price) * item.quantity) - Decimal(item.total_discount), order.currency),
shopify_created_at=parser.parse(order.created_at).astimezone(pytz.utc)
)
new_order_list.append(new_order)
round_up_line_item_id = item.id
break
if round_up_line_item_id and new_order:
# Check if the round up item is refunded for this order.
for refund in order.refunds:
for refund_line_item in refund.refund_line_items:
if refund_line_item.line_item_id == round_up_line_item_id:
new_order.order_roundup_total = new_order.order_roundup_total - Money((refund_line_item.quantity * settings.ROUND_UP_DEFAULT_PRICE), order.currency)
new_order.shopify_payment_status = RoundUpOrders.SHOPIFY_PAYMENT_STATUS.PARTIALLY_REFUNDED
if new_order.order_roundup_total <= Money(Decimal(0), order.currency):
new_order.order_roundup_total = Money(Decimal(0), order.currency)
new_order.shopify_payment_status = \
RoundUpOrders.SHOPIFY_PAYMENT_STATUS.REFUNDED
try:
with transaction.atomic():
if new_order_list:
RoundUpOrders.objects.bulk_create(new_order_list)
if new_max_created_UTC:
user.userprofile.latest_order_sync_time = new_max_created_UTC
user.userprofile.save()
except IntegrityError:
# Try to process orders individually
for new_order in new_order_list:
try:
new_order.save()
except IntegrityError:
pass
if new_max_created_UTC:
user.userprofile.latest_order_sync_time = new_max_created_UTC
user.userprofile.save()
except Exception as e:
logging.error(e.message)
pass
@shared_task
def check_user_onboarding_progress(domain):
try:
store = models.AuthAppShopUser.objects.get(myshopify_domain=domain)
except ObjectDoesNotExist:
logging.warning("Onboarding-Check-Task-No-User: " + str(domain))
return
if not store.userprofile.onboarding_email_sent:
required_steps_string = ''
if store.userprofile.setup_required:
# User has to do all setup tasks
required_steps_string = '- Please complete the Round Up App setup wizard by accessing the app from your Shopify admin.\n'
# Does the customer have a stripe cusomter?
stripe_customer = get_customer_for_user(store)
# Get all the current customers/stores payment methods
cards = Card.objects.filter(customer=stripe_customer).order_by("created_at")
if not stripe_customer or not cards:
required_steps_string = '- Please add your payment information to the Round Up App payment settings (Required to make donations).\n'
# Does the customer have a charity selected?
try:
if not store.store_charity.selected_charity:
required_steps_string = '- Please select a Charity in the Round Up app.\n'
except ObjectDoesNotExist:
required_steps_string = '- Please select a Charity in the Round Up app.\n'
# Has the customer signalled that they have included the setup stuff?
try:
if not store.userprofile.install_user_verified:
required_steps_string = '- Please ensure that you have modified your Cart page theme to include the Round Up app code snippet.\n'
except ObjectDoesNotExist:
required_steps_string = '- Please ensure that you have modified your Cart page theme to include the Round Up app code snippet.\n'
if required_steps_string != '':
# Send an email
ctx = {
"myshopify_domain": store.myshopify_domain,
"required_steps_string": required_steps_string
}
subject = render_to_string("main_app/email/required_steps_subject.txt", ctx)
subject = subject.strip()
message = render_to_string("main_app/email/required_steps_body.txt", ctx)
email = store.userprofile.shop_contact_email
num_sent = EmailMessage(
subject,
message,
to=[email],
from_email=settings.PINAX_STRIPE_INVOICE_FROM_EMAIL
).send()
store.userprofile.onboarding_email_sent = True
store.userprofile.save()
return
else:
return
@shared_task
def ask_for_review(domain):
try:
store = models.AuthAppShopUser.objects.get(myshopify_domain=domain)
except ObjectDoesNotExist:
logging.warning("Review-Check-Task-No-User: " + str(domain))
return
if not store.token or store.token == '00000000000000000000000000000000':
return
if not store.userprofile.review_email_sent:
# Does the customer have a stripe cusomter?
stripe_customer = get_customer_for_user(store)
# Get all the current customers/stores payment methods
cards = Card.objects.filter(customer=stripe_customer).order_by("created_at")
if store.userprofile.setup_required == False and stripe_customer and cards:
# Send an email
ctx = {
"myshopify_domain": store.myshopify_domain,
}
subject = render_to_string("main_app/email/review_subject.txt", ctx)
subject = subject.strip()
message = render_to_string("main_app/email/review_body.txt", ctx)
email = store.userprofile.shop_contact_email
num_sent = EmailMessage(
subject,
message,
to=[email],
from_email=settings.PINAX_STRIPE_INVOICE_FROM_EMAIL
).send()
store.userprofile.review_email_sent = True
store.userprofile.save()
return
else:
return
@shared_task
def app_uninstall_task(data, **kwargs):
try:
user = models.AuthAppShopUser.objects.get(myshopify_domain=kwargs['domain'])
user.token = '00000000000000000000000000000000'
user.save()
# Cancel any Stripe subscriptions
try:
stripe_customer = get_customer_for_user(user)
if subscriptions.has_active_subscription(stripe_customer):
user_subscriptions = models.Subscription.objects.filter(
customer=stripe_customer
).filter(
Q(ended_at__isnull=True) | Q(ended_at__gt=timezone.now())
)
for subscription in user_subscriptions:
subscriptions.cancel(subscription, at_period_end=False)
# Clear subscription reason
models.StripeCustomerSubReason.objects.update_or_create(
store=user, defaults={"subscription": None, 'reason': None}
)
# Clear stripe cards
user_cards = Card.objects.filter(customer=stripe_customer).order_by("created_at")
for card in user_cards:
delete_card(stripe_customer, card.stripe_id)
except StripeError as e:
logging.error(str(e.message))
# Send an email to the user to welcome them
try:
ctx = {
"myshopify_domain": user.myshopify_domain,
}
subject = render_to_string("main_app/email/uninstall_subject.txt", ctx)
subject = subject.strip()
message = render_to_string("main_app/email/uninstall_body.txt", ctx)
email = user.userprofile.shop_contact_email
num_sent = EmailMessage(
subject,
message,
to=[email],
from_email=settings.PINAX_STRIPE_INVOICE_FROM_EMAIL
).send()
except Exception:
pass
# Invalidate any existing user sessions.
user.clear_user_sessions()
connection.close()
except ObjectDoesNotExist:
if kwargs['domain']:
logging.warning("App-Uninstall-Webhook-No-User-Found: " + str(kwargs['domain']))
return
except Exception as e:
logging.error("App-Uninstall-Webhook-Unknown-Exception: "+str(e.message))
raise e
@shared_task
def update_shop_task(data, **kwargs):
try:
user = models.AuthAppShopUser.objects.get(myshopify_domain=kwargs['domain'])
except ObjectDoesNotExist:
logging.warning("Shop-Update-Webhook-No-User-Found: " + str(kwargs['domain']))
connection.close()
return
try:
change_made = False
if user.userprofile.iana_timezone != data['iana_timezone']:
user.userprofile.iana_timezone = data['iana_timezone']
change_made = True
if user.userprofile.display_timezone != data['timezone']:
user.userprofile.display_timezone = data['timezone']
change_made = True
if user.userprofile.name != data['name']:
user.userprofile.name = data['name']
change_made = True
if user.userprofile.shop_contact_email != data['email']:
user.userprofile.shop_contact_email = data['email']
change_made = True
if change_made:
user.userprofile.save()
connection.close()
except Exception as e:
logging.error("Shop-Update-Webhook-Unknown-Exception: "+str(e.message))
raise e
@shared_task
def product_delete_task(data, **kwargs):
try:
store = models.AuthAppShopUser.objects.get(myshopify_domain=kwargs['domain'])
except ObjectDoesNotExist:
logging.warning("Product-Delete-Webhook-No-User-Found: " + str(kwargs['domain']))
return
try:
# Check if the product deleted is the stores round up product
if data['id'] == store.userprofile.round_up_product_id:
# If so, then restore it by creating a new round up product
create_or_assert_round_up_product(store, deleted=True)
connection.close()
except Exception as e:
logging.error("Product-Delete-Webhook-Unknown-Exception: "+str(e.message))
raise e
@shared_task
def product_update_task(data, **kwargs):
try:
store = models.AuthAppShopUser.objects.get(myshopify_domain=kwargs['domain'])
except ObjectDoesNotExist:
logging.warning("Product-Update-Webhook-No-User-Found: " + str(kwargs['domain']))
return
try:
if data['id'] == store.userprofile.round_up_product_id:
with shopify.Session.temp(store.myshopify_domain, store.token):
# Compare data to the expected values.
discrepancy = False
if len(data['variants']) != 1:
discrepancy = True
try:
if data['variants'][0]['price'] != "0.01":
discrepancy = True
if data['variants'][0]['inventory_management'] != None:
discrepancy = True
if data['variants'][0]['taxable'] != False:
discrepancy = True
if data['variants'][0]['requires_shipping'] != False:
discrepancy = True
except KeyError:
discrepancy = True
# If there are discrepencies, destroy the product, and recreate it.
if discrepancy:
product = Product.find(store.userprofile.round_up_product_id)
product.destroy()
connection.close()
except Exception as e:
logging.error("Product-Update-Webhook-Unknown-Exception: "+str(e.message))
raise e
@shared_task(bind=True)
def internal_debug_task(self):
print(self.request.id)
print('Request: {0!r}'.format(self.request))
# @shared_task
# def task_create_or_update_webhooks(full_url):
# """
# Purpose: This function will create, or ensure that they are created all required application
# webhooks (shop update, product update/delete, and app uninstall.
# :param full_url: get the POST url for webhook created from the shopify_webhook module
# :param user: the authenticated and verified user to create a webhook for
# :param webhook_topic: what webhook to register
# :return: true on success, false on fail
# """
#
# user_list = AuthAppShopUser.objects.filter().exclude(token='00000000000000000000000000000000')
#
# for user in user_list:
#
# # if not user.userprofile.round_up_product_id or not user.userprofile.round_up_js_script_id or not user.userprofile.round_up_variant_id:
#
# # user = AuthAppShopUser.objects.get(myshopify_domain='the-brave-collection.myshopify.com')
# try:
# with shopify.Session.temp(user.myshopify_domain, user.token):
# required_webhook_topics = ["app/uninstalled",
# "shop/update",
# "products/delete",
# "products/update"
# ]
#
# # Check to see if the required webhooks exist for the current Shopify shop.
# shop_webhooks = Webhook.find()
#
# for required_webhook in required_webhook_topics:
# webhook_found_and_accurate = False
#
# for shopify_webhook in shop_webhooks:
#
# expected_fields = get_return_fields(shopify_webhook.topic)
#
# # Do the required webhooks exist?
# if required_webhook == shopify_webhook.topic:
# if shopify_webhook.format == "json" and shopify_webhook.address == full_url and \
# shopify_webhook.fields == expected_fields:
# webhook_found_and_accurate = True
# break
# else:
# shopify_webhook.address = full_url
# shopify_webhook.format = "json"
# if expected_fields:
# shopify_webhook.fields = expected_fields
# shopify_webhook.save()
# webhook_found_and_accurate = True
# break
#
# if not webhook_found_and_accurate:
# # If a webhook does not exist, create it.
# new_webhook = Webhook()
# new_webhook.topic = required_webhook
# new_webhook.address = full_url
# new_webhook.format = "json"
# expected_fields = get_return_fields(required_webhook)
# if expected_fields:
# new_webhook.fields = expected_fields
# new_webhook.save()
#
# except (UnauthorizedAccess, ClientError):
# user.token = '00000000000000000000000000000000'
# user.save()
# continue
def manual_order_sync():
user_list = []
specific_store = AuthAppShopUser.objects.get(id=23)
user_list = [specific_store]
for user in user_list:
try:
if not user.userprofile.setup_required:
with shopify.Session.temp(user.myshopify_domain, user.token):
# Track sync times from DB (in UTC)
try:
current_max_created_UTC = user.userprofile.latest_order_sync_time
print("Current UTC Sync time: " + str(current_max_created_UTC))
except ObjectDoesNotExist:
logging.error("USER-{0}-GET-ORDERS-NO-PROFILE".format(str(user)))
continue
new_max_created_UTC = timezone.now()
try:
round_up_variant_id = user.userprofile.round_up_variant_id
print("Round up variant ID: " + str(round_up_variant_id))
if not round_up_variant_id:
raise ValueError
except ValueError:
logging.error("USER-{0}-GET-ORDERS-NO-ROUND-UP-PRODUCT".format(str(user)))
continue
# Create an empty list to hold incoming orders
new_order_list = []
# Set query options for Shopify find orders
if current_max_created_UTC:
# Convert start and end times into store's local timezone.
created_at_min = current_max_created_UTC.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
elif not current_max_created_UTC:
# Only look back as far as the user has existed.
created_at_min = user.userprofile.created.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
created_at_max = new_max_created_UTC.astimezone(
pytz.timezone(user.userprofile.iana_timezone))
order_args = {'status': "any",
'created_at_min': created_at_min,
'created_at_max': created_at_max}
# Iterate through Shopify orders for store
orders_count = Order.count(**order_args)
limit_per_page = 50
pages = math.ceil(orders_count / limit_per_page)
# Iterate through all API Pages.
for i in range(1, int(pages) + 2):
orders = Order.find(limit=limit_per_page, page=i, **order_args)
# Iterate through the current page of Shopify orders to find line items that contain round up products.
for order in orders:
round_up_line_item_id = None
new_order = None
if not order.cancel_reason and order.financial_status in ('paid', 'partially_refunded',
'partially_paid', 'refunded'):
for item in order.line_items:
if item.variant_id == round_up_variant_id:
# Create a Round Up Order Product
new_order = RoundUpOrders(store=user,
order_number=order.id,
order_name=order.name,
order_total=Money(Decimal(order.total_price), order.currency),
order_roundup_total=Money((Decimal(item.price) * item.quantity) - Decimal(item.total_discount), order.currency),
shopify_created_at=parser.parse(order.created_at).astimezone(pytz.utc)
)
new_order_list.append(new_order)
round_up_line_item_id = item.id
break
if round_up_line_item_id and new_order:
# Check if the round up item is refunded for this order.
for refund in order.refunds:
for refund_line_item in refund.refund_line_items:
if refund_line_item.line_item_id == round_up_line_item_id:
new_order.order_roundup_total = new_order.order_roundup_total - Money((refund_line_item.quantity * settings.ROUND_UP_DEFAULT_PRICE), order.currency)
new_order.shopify_payment_status = RoundUpOrders.SHOPIFY_PAYMENT_STATUS.PARTIALLY_REFUNDED
if new_order.order_roundup_total <= Money(Decimal(0), order.currency):
new_order.order_roundup_total = Money(Decimal(0), order.currency)
new_order.shopify_payment_status = \
RoundUpOrders.SHOPIFY_PAYMENT_STATUS.REFUNDED
try:
print("Bulk create new order list: " + str(new_order_list))
with transaction.atomic():
if new_order_list:
RoundUpOrders.objects.bulk_create(new_order_list)
if new_max_created_UTC:
user.userprofile.latest_order_sync_time = new_max_created_UTC
user.userprofile.save()
except IntegrityError:
print("There was an integrity error")
print("Count of new records: " + str(len(new_order_list)))
# Try to process orders individually
for new_order in new_order_list:
try:
new_order.save()
print("SAVED: Order {0}".format(str(new_order.order_number)))
except IntegrityError:
print("Order {0} has an integrity error".format(str(new_order.order_number)))
pass
if new_max_created_UTC:
print("Saving the sync date now anyways")
user.userprofile.latest_order_sync_time = new_max_created_UTC
user.userprofile.save()
except Exception as e:
print("There was a generic error: " + str(e.message))
logging.error(e.message)
pass | UTF-8 | Python | false | false | 28,946 | py | 121 | tasks.py | 69 | 0.520763 | 0.514164 | 0 | 660 | 42.859091 | 196 |
Mrpool96/Python-2020 | 1,924,145,391,679 | d85f951fad966588ad2b99eac0277f3923754a43 | aa534dd11a258dca3b0ab6c0e49355891b046d90 | /Piechart.py | 7daf9e9568a8c2fe9b9a7ce0b8cb93a1b5af6bf4 | [] | no_license | https://github.com/Mrpool96/Python-2020 | b0650171b6d28b1ba7769158d8875dfe9a543d12 | 122b2fc12c1fcd862d53f94f9e30e541640fdc7d | refs/heads/master | 2023-01-03T21:30:35.306675 | 2020-11-01T09:26:16 | 2020-11-01T09:26:16 | 288,363,064 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
y = np.array([35,25,25,10,15])
mylabels = ["one","two","three","four","five"]
myexplode = [0.2,0,0,0,0.3]
plt.pie(y , labels=mylabels , startangle=90, explode=myexplode, shadow = True )
plt.legend()
plt.show() | UTF-8 | Python | false | false | 263 | py | 23 | Piechart.py | 22 | 0.676806 | 0.604563 | 0 | 11 | 23 | 79 |
deemx/myblog | 17,798,344,503,983 | 9b6c0b9b915591c439cdcd55c394fe9b0ff30533 | 748ca0c5dad210a57f8966a20c9976f0e2f0faac | /app/models.py | 69d2c0a6862c59af8a58f1e4ce8238749f287f38 | [] | no_license | https://github.com/deemx/myblog | 7fb6337c6cf184518dfc41a7c931daedb3a616b3 | a03c13af8771c3292300842360985101280f37e0 | refs/heads/master | 2016-09-06T02:57:14.687755 | 2015-09-07T12:06:41 | 2015-09-07T12:06:41 | 40,006,167 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from captcha.fields import CaptchaField
from ckeditor.fields import RichTextField
class Tag(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return '{0}'.format(self.name)
class Post(models.Model):
title = models.CharField(max_length=255)
content = RichTextField()
datestamp = models.DateTimeField()
tags = models.ManyToManyField(Tag)
class Meta:
ordering = ['-id']
def __str__(self):
return '{0}'.format(self.title)
def get_absolute_url(self):
return '/{0}/'.format(self.id)
class Comments(models.Model):
nickname = models.CharField(max_length=35)
comment = models.TextField(default='')
captcha = CaptchaField()
date = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(Post)
| UTF-8 | Python | false | false | 844 | py | 10 | models.py | 7 | 0.667062 | 0.654028 | 0 | 35 | 23.114286 | 50 |
stefan-cross/choraoke | 3,307,124,850,911 | 83e9d9fe26878f03b11511dce3abaf125ca04794 | 6fd59b0c5ccc0240c0cbc15b04ef4eaeb9d8b44b | /backend/ultimate-api/server/tab.py | 9d3e5ecf6d558f0719cc5a8806f166eb19bd4778 | [] | no_license | https://github.com/stefan-cross/choraoke | dcd45ff2ee33623be33051a409736b8a4308b0ae | fe206876d5c2dd8078ae6a9c1d97e55bc9f34aa4 | refs/heads/main | 2023-01-20T14:10:11.431567 | 2020-11-30T08:29:57 | 2020-11-30T08:29:57 | 316,743,439 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Any
# tab {
# title: "tab name",
# artist_name: "",
# author: "",
# capo: "" (can be null),
# Tuning: "" (can be null),
#
# lines: [
# {
# type: "chord" (OR "lyrics", "blank"),
# chords: [
# {
# note: "G",
# pre_spaces: 10
# },
# {
# note: "Em",
# pre_spaces: 8
# }
# ]
# },
# {
# type: "lyrics",
# lyrics: "I found a love for me"
# },
# {
# type: "blank"
# }
# ]
# }
class UltimateTabInfo(object):
'''
Represents the info of an ultimate guitar tab. Does not contain any lyrics or chords
'''
def __init__(self, title: str, artist: str, author: str, difficulty: str = None, key: str = None, capo: str = None, tuning: str = None):
self.title = title
self.artist = artist
self.author = author
# Optionals:
self.difficulty = difficulty
self.key = key
self.capo = capo
self.tuning = tuning
class UltimateTab(object):
'''
Represents an ultimate guitar tab containing Lyrics and Chords
A `queue-like` object which will append lines to object
and can be parsed to formatted json.
'''
JSON_CONTAINER_NAME = 'lines'
JSON_KEY_CHORD_ARRAY = 'chords'
JSON_KEY_NOTE = 'note'
JSON_KEY_LYRIC = 'lyric'
JSON_KEY_BLANK = 'blank'
JSON_KEY_TYPE = 'type'
JOSN_KEY_LEAD_SPACES = 'pre_spaces'
def __init__(self):
self.lines = []
def _append_new_line(self, type: str, content_tag: str, content: Any) -> None:
line = {'type': type}
if content_tag is not None:
line[content_tag] = content
self.lines.append(line)
def append_chord_line(self, chords_line: str) -> None:
'''
Appends a chord line to the tab.
Parameters:
- chords_line: A single-line string containing leading spaces and guitar chords (i.e. G, Em, etc.)
'''
chords = [] # Array of dictionary of chords
leading_spaces = 0
for c in chords_line.split(' '):
if not c: # A space character recognized
leading_spaces += 1
else:
chord = {
self.JSON_KEY_NOTE: c,
self.JOSN_KEY_LEAD_SPACES: leading_spaces
}
chords.append(chord)
leading_spaces = 1 # reset for next chord to read in - resets to 1 to compensate for `split`
self._append_new_line(self.JSON_KEY_CHORD_ARRAY, self.JSON_KEY_CHORD_ARRAY, chords)
def append_lyric_line(self, lyric_line: str) -> None:
'''
Appends a lyric line to the tab.
Parameters:
- lyric_line: A single-line string containing lyrics (and any leading spaces needed)
'''
self._append_new_line(self.JSON_KEY_LYRIC, self.JSON_KEY_LYRIC, lyric_line)
def append_blank_line(self) -> None:
'''
Appends a blank line to the tab.
'''
self._append_new_line(self.JSON_KEY_BLANK, None, None)
def as_json_dictionary(self) -> dict:
'''
Returns a dictionary representation of the tab object.
Properly formatted for use as a json object.
'''
return {self.JSON_CONTAINER_NAME: self.lines}
| UTF-8 | Python | false | false | 3,473 | py | 9 | tab.py | 5 | 0.523179 | 0.521163 | 0 | 121 | 27.702479 | 140 |
Mantabit/python_examples | 10,471,130,316,860 | 26097d30c9001583aad5c186060ea6c6fb1f8ebc | ed6dd94781e3022f230050284d2ddd3554cc0772 | /pyqt/basic_gui.py | 75cf9caea72edead569cdbbe8ce763c7c436e06e | [] | no_license | https://github.com/Mantabit/python_examples | 602d4f4237dbc2044d30dc5482e3e2dee4d90fb6 | 516dbb9cc63c7de5bfe7d0e79477dff9ff340a5d | refs/heads/master | 2021-07-04T08:26:38.007606 | 2020-08-17T10:09:04 | 2020-08-17T10:09:04 | 153,170,298 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 12:21:39 2020
@author: dvarx
"""
from PyQt5.QtWidgets import QApplication,QLabel,QWidget,QVBoxLayout,QPushButton,QHBoxLayout,QSlider
from PyQt5.QtCore import Qt
app=QApplication([])
def top_btn_cb():
global topbutton
topbutton.setText("Clicked Top")
def slider_val_chgd(val):
global sliderlabel
sliderlabel.setText("Value changed:%-3d"%(val))
mainwindow=QWidget()
layout=QVBoxLayout()
toplayout=QHBoxLayout()
bottomlayout=QHBoxLayout()
topbutton=QPushButton("Top")
toplabel=QLabel("Top-Button: ")
toplayout.addWidget(toplabel)
toplayout.addWidget(topbutton)
bottombutton=QPushButton("Bottom")
bottomlabel=QLabel("Bottom-Button: ")
bottomlayout.addWidget(bottomlabel)
bottomlayout.addWidget(bottombutton)
sliderlabel=QLabel("Current Value:%-3d"%(0))
slider=QSlider(Qt.Horizontal)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(10)
slider.setMinimum(0)
slider.setMaximum(255)
slider.setValue(0)
slider.valueChanged.connect(slider_val_chgd)
sliderlayout=QVBoxLayout()
sliderlayout.addWidget(slider)
sliderlayout.addWidget(sliderlabel)
topbutton.clicked.connect(top_btn_cb)
layout.addLayout(toplayout)
layout.addLayout(bottomlayout)
layout.addLayout(sliderlayout)
mainwindow.setLayout(layout)
mainwindow.show()
app.exec_() | UTF-8 | Python | false | false | 1,318 | py | 43 | basic_gui.py | 37 | 0.789833 | 0.770865 | 0 | 59 | 21.355932 | 99 |
santoshr1016/WeekendMasala | 13,048,110,653,875 | 5de04ce814bb03adea8d9fad50da5a2bb0ba3da6 | b41da6f351f27bf0d45a4e4d0e1be8f3a86f4b64 | /itsybitsy/test_torus.py | 3c045886bf0539e377139576af44a37c1387e230 | [] | no_license | https://github.com/santoshr1016/WeekendMasala | a5adbabe0b78cde567667376d7ddf05bb505a0ff | e099f9ac9677f7acb8faf620af94a06d76cae044 | refs/heads/master | 2020-03-26T00:26:32.649429 | 2019-08-30T07:32:24 | 2019-08-30T07:32:24 | 144,320,624 | 0 | 0 | null | false | 2019-06-03T23:08:00 | 2018-08-10T18:36:38 | 2019-06-03T22:55:20 | 2019-06-03T23:07:59 | 2,764 | 0 | 0 | 0 | Python | false | false | import time
def dp_way(str1, start, end, dp):
# base cases
# print("DP Way")
# print(timeit.timeit())
if start > end:
return 0
if start == end:
return 1
#case 1
if dp[start][end] == 0:
if str1[start] == str1[end]:
dp[start][end] = 2 + dp_way(str1, start+1, end-1, dp)
#case 2
else:
left = dp_way(str1, start+1, end, dp)
right = dp_way(str1, start, end-1, dp)
dp[start][end] = max(left, right)
# print(timeit.timeit())
return dp[start][end]
def longest_palindrome(str1, start, end):
if start > end:
return 0
if start == end:
return 1
#case 1
if str1[start] == str1[end]:
return 2 + longest_palindrome(str1, start+1, end-1)
#case 2
left = longest_palindrome(str1, start+1, end)
right = longest_palindrome(str1, start, end-1)
return max(left, right)
str1 = "rtyftkhkkayakiopiouuhgioyoyi"
start = 0
end = len(str1) - 1
print("recursive Way")
startt = time.time()
print(longest_palindrome(str1, start, end))
done = time.time()
elapsed = done - startt
print(elapsed)
print("*"*22)
size = len(str1)
dp = [[0 for i in range(size)] for i in range(size)]
start = 0
end = len(str1) - 1
print("DP Way")
startt = time.time()
print(dp_way(str1, start, end, dp))
done = time.time()
elapsed = done - startt
print(elapsed)
| UTF-8 | Python | false | false | 1,400 | py | 231 | test_torus.py | 205 | 0.581429 | 0.55 | 0 | 65 | 20.538462 | 65 |
valentyntroyan/Homework_PyCharm | 7,619,272,024,174 | 45b5407f10403c3a5ab68d8acff56c11efac2c39 | 3363a24d65383a5a064fa6602bb37df7d075fe31 | /homework_1.py | 20e26e7a051aa5c33ff2f686340f2de49cafd4d3 | [] | no_license | https://github.com/valentyntroyan/Homework_PyCharm | a7e79c9c1633ae7c51d230928824306e2bcee5c2 | af258b696e324fd8c6602a74acab564b1ad4f5ea | refs/heads/master | 2020-11-25T01:55:33.663037 | 2019-12-16T17:49:08 | 2019-12-16T17:49:08 | 228,438,560 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('Hello world!')
print('Second Commit')
print('Third Commit') | UTF-8 | Python | false | false | 66 | py | 1 | homework_1.py | 1 | 0.727273 | 0.727273 | 0 | 3 | 21.333333 | 22 |
michaelkook/GraphLab | 10,651,518,897,302 | 835ce1ed563c0bdd1ce7b5d850b73b77583393c8 | bac60efbee14e6e7a2b637593e7b5ca08671d212 | /makesnapgraphs.py | 1e0f892d6318e4b62ed19a94a38b981ea4c4bbcf | [] | no_license | https://github.com/michaelkook/GraphLab | 96fb813927c148376bc239f3a228d4f9c233a116 | 466a732ccd429afc47dd83c4b2745220a060c321 | refs/heads/master | 2016-09-16T15:58:14.093239 | 2012-11-28T21:50:58 | 2012-11-28T21:50:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import scipy.io as sio
import sys
from glob import glob
def maketsv(G_dir, toDir):
for gr in glob(os.path.join(G_dir,'*')):
G = sio.loadmat(gr)['fibergraph']
f = open( os.path.join(toDir,getBaseName(gr)+".tsv"),"w")
for row in G.indices:
nnz = G[row,:].nonzero()[1]
for edge in nnz:
f.write(str(row)+" "+ str(edge) + "\n")
f.close()
def makesnap(G_dir, toDir):
for gr in glob(os.path.join(G_dir,'*')):
G = sio.loadmat(gr)['fibergraph']
f = open( os.path.join(toDir,getBaseName(gr)+".snap"),"w")
for row in range(G.shape[0]):
nnz = G[row,:].nonzero()[1]
if nnz.shape[0] == 0:
f.write(str(row) + " 0\n")
else:
f.write(str(row))
for edge in nnz:
f.write(" "+ str(edge))
f.close()
def getBaseName(fn):
if fn.endswith('/'):
fn = fn[:-1]
return (os.path.splitext(fn.split('/')[-1])[0]).partition('_')[0]
if __name__ == '__main__':
makesnap(sys.argv[1], sys.argv[2])
| UTF-8 | Python | false | false | 1,038 | py | 3 | makesnapgraphs.py | 2 | 0.526012 | 0.514451 | 0 | 44 | 22.590909 | 67 |
junyi1997/pi_livenet | 16,810,502,034,550 | be4cce6274ad68fe2a87e4742fa50f94cc078ca8 | a53c2f957f3b85b7f4271a63ca57a70246ac7937 | /GUIdemo.py | 8158efa4849526ed68ee11898d64753f6f9d8ff8 | [] | no_license | https://github.com/junyi1997/pi_livenet | 20e19b5fa6f8b1ac3e2e9bcddee52579b31d6cd2 | 4c92c953eb3e7aae328b3e39c21d254513d69d4d | refs/heads/master | 2023-07-07T12:54:27.872056 | 2021-08-16T01:36:26 | 2021-08-16T01:36:26 | 395,272,413 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter as tk
from tkinter import ttk
import tkinter.messagebox as messagebox
import pickle
from PIL import Image,ImageTk
from tkinter import Scrollbar, Frame
from tkinter.ttk import Treeview
# import BotSpeak
# list_name = ["Adela Chen","Bonnie Lin","Allan Lin","M10907716","M10907306","M10907324"]
# list_visit = ["Junyi Wu","Baron Syu","Frank Zhou","M10907324","M10907324","M10907324"]
# list_time = ["2021/08/03/15","2021/08/04/15","2021/08/05/15","2021/08/06/15","2021/08/07/15","2021/08/08/15"]
import fr_livent
import openpyxl
########################################################################
class MyApp(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
self.list_name=[]
self.list_visit=[]
self.list_time=[]
"""Constructor"""
self.win = tk.Tk()
# self.win.attributes("-fullscreen", True)
self.win.geometry("1024x768")
self.win.title("雲端註冊與深度辨識真面目系統")#定義標題名稱
left=(self.win.winfo_screenwidth()-1024)//2#指定視窗置中
top=(self.win.winfo_screenheight()-768)//2
self.win.geometry("{:}x{:}+{:}+{:}".format(1024,768,left,top))
print(self.win.winfo_screenwidth(),self.win.winfo_screenheight())
#圖片呼叫
self.photo_background=tk.PhotoImage(file=r"./image/main_background.png")
self.photo_inSystem=tk.PhotoImage(file=r"./image/in_system.png")
self.photo_inSearch=tk.PhotoImage(file=r"./image/in_search.png")
canvas_width = 1024#新增一個畫布
canvas_height =768
canvas = tk.Canvas(self.win,
width=canvas_width,
height=canvas_height)
canvas.pack()
#背景
canvas.create_image(512,384, image=self.photo_background)#將背景貼到畫布上
def inSystem():
# self.hide()
fr_livent.main(self.win.winfo_screenwidth(),self.win.winfo_screenheight())
# self.show()
#選擇使用說明
but_System=tk.Button(self.win,image=self.photo_inSystem, command=inSystem)
but_System.place(x=50,y=600)
#進入登入畫面
but_Search=tk.Button(self.win,image=self.photo_inSearch, command=self.openFrame)
but_Search.place(x=600,y=600)
# BotSpeak.speak("歡迎來到KEEPING個人資料管理系統 請點選下方按鍵登入")
self.win.mainloop()
# BotSpeak.speak("掰掰")
#----------------------------------------------------------------------
def hide(self):
""""""
self.win.withdraw()
def closeWindow(self,myclosewindow):
self.onCloseOtherFrame(myclosewindow)
def getinfo(self):
fn = 'EE3407301.xlsx'
wb = openpyxl.load_workbook(fn)
wb.active = 0
ws = wb.active
print(ws.max_row)
print(ws.max_column)
# print(wb)
for i in range(int(ws.max_row-1)):
read_Visitor_name='A'+str(i+2)
read_find_who='D'+str(i+2)
read_time='E'+str(i+2)
read_place='F'+str(i+2)
self.list_name.append(ws[read_Visitor_name].value)
self.list_visit.append(ws[read_find_who].value)
self.list_time.append(ws[read_time].value)
# print("list_name = {:}".format(self.list_name))
# print("list_visit = {:}".format(self.list_visit))
# print("list_time = {:}".format(self.list_time))
# #----------------------------------------------------------------------
def openFrame(self):
""""""
self.hide()
self.win_Search = tk.Toplevel()
# self.win_Search.attributes("-fullscreen", True)
#使用者關閉視窗觸發的事件(第一個刪除視窗,第二個為函式名,即過程)
self.win_Search.protocol('WM_DELETE_WINDOW',lambda:self.closeWindow(self.win_Search))
#win_Search.attributes("-fullscreen", True)
left=(self.win_Search.winfo_screenwidth()-1024)//2
top=(self.win_Search.winfo_screenheight()-768)//2
self.win_Search.geometry("{:}x{:}+{:}+{:}".format(1024,768,left,top))
self.win_Search.title("行事曆")
self.win_Search.photo_background=tk.PhotoImage(file=r"./image/Search_background.png")
self.win_Search.photo_back=tk.PhotoImage(file=r"./image/back.PNG")
canvas_width = 1024
canvas_height =768
canvas = tk.Canvas(self.win_Search,
width=canvas_width,
height=canvas_height)
canvas.pack()
#背景
canvas.create_image(512,384, image=self.win_Search.photo_background)
btn01= tk.Button(self.win_Search,image=self.win_Search.photo_back,command=lambda: self.onCloseOtherFrame(self.win_Search) )
btn01.place(x=800,y=670)
#使用Treeview組件實現表格功能
frame = Frame(self.win_Search)
frame.place(x=50, y=50, width=800, height=600)
style_head = ttk.Style()
style_head.configure("Treeview.Heading", font=('Noto Sans Mono CJK TC Bold', 25), rowheight=200)
style_head.configure("Treeview", font=('Noto Sans Mono CJK TC Bold', 25), rowheight=100)
#滾動條
scrollBar = tk.Scrollbar(frame)
scrollBar.pack(side=tk.RIGHT, fill=tk.Y)
#Treeview組件,6列,顯示表頭,帶垂直滾動條
tree = Treeview(frame,
columns=( 'c1' , 'c2' , 'c3' ),
show= "headings" ,
yscrollcommand=scrollBar.set)
#設置每列寬度和對齊方式
tree.column( 'c1' , width=230, anchor= 'center' )
tree.column( 'c2' , width=230, anchor= 'center' )
tree.column( 'c3' , width=340, anchor= 'center' )
#設置每列表頭標題文本
tree.heading( 'c1' , text= '訪客姓名' )
tree.heading( 'c2' , text= '受訪者姓名' )
tree.heading( 'c3' , text= '來訪時間' )
tree.pack(side=tk.LEFT, fill=tk.Y)
#Treeview組件與垂直滾動條結合
scrollBar.config(command=tree.yview)
#定義並綁定Treeview組件的鼠標單擊事件
def treeviewClick(event):
pass
tree.bind( '<Button-1>' , treeviewClick)
# print(len(list_time))
self.getinfo()
for i in range(len(self.list_name)):
tree.insert("",i,values=(self.list_name[i],self.list_visit[i],self.list_time[i])) #插入數據
# tree.insert("",1,values=("Adela Chen","Junyi Wu","2021/08/03/15")) #插入數據
# tree.insert("",2,values=("Bonnie Lin","Baron Syu","2021/08/04/15")) #插入數據
# tree.insert("",3,values=("Allan Lin","Frank Zhou","2021/08/05/15")) #插入數據
#----------------------------------------------------------------------
def onCloseOtherFrame(self, otherFrame):
""""""
otherFrame.destroy()
self.show()
#----------------------------------------------------------------------
def CloseWin(self, otherFrame):
""""""
otherFrame.destroy()
#----------------------------------------------------------------------
def show(self):
""""""
self.win.update()
self.win.deiconify()
#----------------------------------------------------------------------
if __name__ == "__main__":
app = MyApp() | UTF-8 | Python | false | false | 7,508 | py | 5 | GUIdemo.py | 3 | 0.531427 | 0.491789 | 0 | 201 | 34.144279 | 131 |
IDJack/leetcode | 936,302,906,243 | 33a27c9597e8d7ea12e2af6e9eadd87e37f9aef8 | 0d15fb1a34f7ff90811f679e4870e826d1da0ee4 | /python3/905_sort_array_by_parity.py | a03cc18843ff0771fcb14c2729d706593d104d17 | [] | no_license | https://github.com/IDJack/leetcode | e405f7acbb6448ff4a77d4ce3acb9ed097068837 | b17d972028e8b8f323ee7d9f2347f88e0f548ea9 | refs/heads/master | 2020-03-31T17:04:23.444593 | 2018-10-13T08:28:31 | 2018-10-13T08:28:31 | 149,770,688 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
# 两指针
start,end = 0,len(A)-1
while start < end:
while start <= len(A)-1 and A[start] % 2 == 0:
start += 1
while end >= 0 and A[end] % 2 == 1:
end -= 1
# 边界条件
if start > len(A)-1 or end < 0 or start > end:
return A
A[start],A[end] = A[end],A[start]
start += 1
end -= 1
return A
| UTF-8 | Python | false | false | 639 | py | 21 | 905_sort_array_by_parity.py | 20 | 0.3552 | 0.3328 | 0 | 23 | 25.826087 | 58 |
audiolion/py-fitness | 16,552,803,989,744 | b596e6d89cda17940323f4de1d0a8d07ddf54aa2 | 7d60588702280c7f8c6477e7703528bb860aaa4a | /py_fitness/py_fitness/workout/migrations/0013_remove_workout_editor.py | 05d89b76f95431f5ea6672a2a0a7643a193db50a | [
"MIT"
] | permissive | https://github.com/audiolion/py-fitness | 7e420c4ef6e10ff122fc4f30309cd428d4d5bc38 | 9e0ca785c73a07cb788685bbde6e840a7a2e3419 | refs/heads/master | 2021-05-01T10:16:22.214094 | 2017-01-02T02:03:51 | 2017-01-02T02:03:51 | 68,249,682 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-29 03:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('workout', '0012_auto_20161025_2102'),
]
operations = [
migrations.RemoveField(
model_name='workout',
name='editor',
),
]
| UTF-8 | Python | false | false | 394 | py | 52 | 0013_remove_workout_editor.py | 32 | 0.591371 | 0.507614 | 0 | 19 | 19.736842 | 48 |
mattgiltaji/miscutils | 10,849,087,416,738 | 093610ba6f2cba0e977f6c0183b920828188e160 | 76192cb0921ef817df5ea9fcc438b127416e607d | /tests/test_filtermanuel.py | 56e5660c4746d72a9d3bb3ba73ed029ec9ffe7e4 | [] | no_license | https://github.com/mattgiltaji/miscutils | 6138dade31bec633d7e4a93c0b5b2039647362f3 | a116edde88efdd30c6f6c8142d0422150c7744cf | refs/heads/main | 2022-08-29T22:49:20.069735 | 2020-02-24T04:15:33 | 2020-02-24T04:15:33 | 86,417,700 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Tests for filtermanuel.py
# run from miscutils dir as:
# python -m pytest tests/test_filtermanuel.py
from os import path
import pytest
import filtermanuel.filtermanuel as fm
# paths to test files and such
current_dir = path.dirname(path.abspath(__file__))
test_data_dir = path.join(current_dir, 'files', 'filtermanuel')
basic_test_dir = path.join(test_data_dir, 'basic_files')
small_file = path.join(basic_test_dir, 'small.txt')
blank_file = path.join(basic_test_dir, 'blank.txt')
big_file = path.join(basic_test_dir, 'big.txt')
not_exist_file = path.join(basic_test_dir, 'not_exist.txt')
no_matches_dir = path.join(test_data_dir, 'copy_nothing')
some_matches_dir = path.join(test_data_dir, 'copy_some')
all_matches_dir = path.join(test_data_dir, 'copy_everything')
remove_all_dir = path.join(test_data_dir, 'remove_all')
remove_some_dir = path.join(test_data_dir, 'remove_some')
remove_none_dir = path.join(test_data_dir, 'remove_none')
full_test_dir = path.join(test_data_dir, 'full_test')
# hardcoding fun
fake_monster_file_contents = ["Monster", "Monster'1", "Monster 2", "Monster.37",
"monster-dash", "comma, the monster"]
class TestShouldCopy:
@pytest.mark.parametrize("test_input", [
"=================================", # real line
"=====================================", # longer
"==========", # arbitrarily short but valid
"====", # shortest possible valid
])
def test_copy_separator(self, test_input):
assert fm.should_copy(test_input)
@pytest.mark.parametrize("test_input", [
"=", "==", "===", # all too short
"---------------------------", # wrong symbol
"-=-=-=-=-=-=-=-=-=-=", # no mixing
"----=======-----", # nope
"============================-", # no dash
])
def test_dont_copy_bad_separator(self, test_input):
assert not fm.should_copy(test_input)
@pytest.mark.parametrize("test_input", [
"[Ye Olde Medievale Villagee]", # real area
# real areas (with fun characters)
"[An Incredibly Strange Place (Mediocre Trip)]",
"[Anger Man's Level]",
"[The Gourd!]",
"[LavaCo™ Lamp Factory]",
"[A Deserted Stretch of I-911]",
"[Engineering]", # no space in area name
])
def test_copy_area_name(self, test_input):
assert fm.should_copy(test_input)
@pytest.mark.parametrize("test_input", [
"[]", "[ ]", # no blanks alone
# fake areas to test regex bounds
"[!]", "[*****]",
])
def test_dont_copy_bad_area_name(self, test_input):
assert not fm.should_copy(test_input)
@pytest.mark.parametrize("test_input", [
"Monster", "Monster 2", "Monster'1", "comma, the monster",
"monster-dash", "Monster.37",
])
def test_copy_matching_monster(self, test_input):
assert fm.should_copy(test_input, fake_monster_file_contents)
@pytest.mark.parametrize("test_input", [
"monster", "yolo", "comma, ", "-dash", "37", "Monst",
])
def test_dont_copy_nonmatching_monster(self, test_input):
assert not fm.should_copy(test_input, fake_monster_file_contents)
@pytest.mark.parametrize("test_input", [
"Monster {1}", "Monster 2 {3}", "Monster'1 {2}",
"comma, the monster {3}", "monster-dash {2}", "Monster.37 {1}",
])
def test_copy_matching_monster_with_brackets(self, test_input):
assert fm.should_copy(test_input, fake_monster_file_contents)
class TestGetFileContents:
def test_get_blank_file_contents(self):
results = fm.get_file_contents(blank_file)
assert results == []
def test_error_on_bad_filename(self):
with pytest.raises(FileNotFoundError) as excinfo:
fm.get_file_contents(not_exist_file)
assert 'No such file or directory' in excinfo.value.strerror
def test_get_big_file_contents(self):
results = fm.get_file_contents(big_file)
assert len(results) == 5001
for x in range(0, 5000):
assert "this is a much longer line{0:04d}\n".format(x) in results
def test_get_small_file_contents(self):
results = fm.get_file_contents(small_file)
assert len(results) == 11
for x in range(0, 10):
assert "line{0:02d}\n".format(x) in results
assert "line11\n" not in results
class TestRemoveBlankAreas:
@pytest.mark.parametrize("test_dir", [
remove_all_dir, remove_some_dir, remove_none_dir,
])
def test_remote_blank_areas(self, test_dir):
contents_file = path.join(test_dir, 'contents.txt')
expected_file = path.join(test_dir, 'expected.txt')
with open(contents_file, 'r') as cf:
contents = cf.readlines()
actual = fm.remove_blank_areas(contents=contents)
with open(expected_file, 'r') as ef:
expected = ef.readlines()
assert expected == actual
class TestFilterManuel:
@pytest.mark.parametrize("test_dir", [
no_matches_dir, some_matches_dir, all_matches_dir,
])
def test_filtering(self, tmpdir, test_dir):
actual_file = str(tmpdir.join('filtered_manuel.txt'))
manuel_file = path.join(test_dir, 'manuel.txt')
faxbot_file = path.join(test_dir, 'faxbot.txt')
expected_file = path.join(test_dir, 'expected.txt')
fm.filter_manuel(manuel_path=manuel_file, faxbot_path=faxbot_file,
output_path=actual_file)
with open(expected_file, 'r') as ef:
expected = ef.readlines()
with open(actual_file, 'r') as af:
actual = af.readlines()
assert expected == actual
class TestParseArgs:
@pytest.mark.parametrize("test_dir", [
no_matches_dir, some_matches_dir, all_matches_dir,
])
def test_arg_parsing(self, tmpdir, test_dir):
output_file = str(tmpdir.join('filtered_manuel.txt'))
manuel_file = path.join(test_dir, 'manuel.txt')
faxbot_file = path.join(test_dir, 'faxbot.txt')
arg_string = manuel_file + " " + faxbot_file + " " + output_file
results = fm.parse_args(arg_string.split())
assert manuel_file == results.manuel
assert faxbot_file == results.faxbot
assert output_file == results.output
def test_parse_args_mandatory_fields(self, capsys):
with pytest.raises(SystemExit) as excinfo:
fm.parse_args([])
out, err = capsys.readouterr()
assert 2 == excinfo.value.code
assert 'manuel' in err
assert 'faxbot' in err
assert 'output' in err
class TestMain:
@pytest.mark.parametrize("test_dir", [
no_matches_dir, some_matches_dir, all_matches_dir, full_test_dir,
])
def test_main(self, tmpdir, test_dir, mocker):
output_file = str(tmpdir.join('filtered_manuel.txt'))
manuel_file = path.join(test_dir, 'manuel.txt')
faxbot_file = path.join(test_dir, 'faxbot.txt')
expected_file = path.join(test_dir, 'expected.txt')
arg_string = "filtermanuel.py {mf} {ff} {of}".format(
mf=manuel_file, ff=faxbot_file, of=output_file)
mocker.patch('sys.argv', arg_string.split())
fm.main()
with open(expected_file, 'r') as ef:
expected = ef.readlines()
with open(output_file, 'r') as of:
actual = of.readlines()
assert expected == actual
@pytest.mark.real
def test_real(self, mocker):
real_dir = path.abspath(r'D:\Matt\Desktop\kolmafia\samples')
output_file = path.join(real_dir, 'filtered_faxbot.txt')
manuel_file = path.join(real_dir, 'monster manuel.txt')
faxbot_file = path.join(real_dir, 'faxbot.txt')
args = ["filtermanuel.py", manuel_file, faxbot_file, output_file]
mocker.patch('sys.argv', args)
fm.main()
| UTF-8 | Python | false | false | 8,105 | py | 24 | test_filtermanuel.py | 8 | 0.584105 | 0.578428 | 0 | 214 | 35.864486 | 80 |
nuaays/apiserver | 19,009,525,258,254 | 2ce86ff550e67d52c2597c07f5143c4e8955dd4a | 6db9ad22b62c137b7401f4b510bf0042cc42291b | /api/member_card/a_member_card.py | abec158c5fbb686aa1172da0bcfef2f827671efe | [] | no_license | https://github.com/nuaays/apiserver | 413a9e1d0b1a76878ae2d449ab24cc439896b977 | 15621db1a64ffe199619924b75a5b5c5e6416bed | refs/heads/master | 2021-01-20T08:07:45.930902 | 2017-03-30T08:40:46 | 2017-03-30T08:40:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
个人中心-VIP会员
"""
from eaglet.core import api_resource
from eaglet.decorator import param_required
from business.member_card.member_card import MemberCard
class AMemberCard(api_resource.ApiResource):
"""
个人中心-VIP会员
"""
app = 'member_card'
resource = 'member_card'
@param_required([])
def get(args):
"""
通过 个人中心-VIP会员 入口进入会员页面。通常情况下,只有绑定了手机号并且已经开通了的会员会进入到这个页面,
但为了防止非会员通过别人直接分享链接或者其他方式直接打开这个页面,这里面再次对is_binded和is_vip进行了判断,
如果is_binded为False,前端应该跳转到绑定手机号页面,
如果is_vip为False,前端应该跳转到会员卡列表页面
@param 无
@return member_card dict
"""
webapp_user = args['webapp_user']
is_binded = webapp_user.is_binded
member_id = webapp_user.member.id
member_card = MemberCard.from_member_id({
"member_id": member_id,
"fill_options": {
"with_price": True
}
})
if is_binded and member_card:
data = {
'card_number': member_card.card_number,
'is_active': member_card.is_active,
'remained_backcash_times': member_card.remained_backcash_times,
'balance': member_card.balance,
'card_name': member_card.card_name,
'is_binded': is_binded,
'is_vip': True,
'user_icon': webapp_user.user_icon,
'username_for_html': webapp_user.username_for_html,
'valid_time_to': member_card.valid_time_to,
'interval_days': member_card.interval_days,
'next_clear_time': member_card.next_clear_time,
'bill_info': member_card.get_bill_info()
}
else:
data = {
'is_binded': is_binded,
'is_vip': False
}
return data
| UTF-8 | Python | false | false | 1,812 | py | 370 | a_member_card.py | 274 | 0.675 | 0.674342 | 0 | 60 | 24.333333 | 67 |
brightforme/python-bright | 6,777,458,416,620 | 121aea16487b7e8434fd4e85888cb9044ca22ec8 | a52d446dfdef8f1de7317dca64f1f84bc2d5c783 | /tests/test_collections.py | 904423018a0c2a81c2f90c1edafec4d3ba7ebafa | [
"MIT"
] | permissive | https://github.com/brightforme/python-bright | 97fb2bc85192ffcd35e6b991ffdeb0cd157232d1 | 8366ddf411e237e2a440a578df3d1b0c99cd67ca | refs/heads/master | 2020-02-26T16:01:31.772349 | 2016-10-03T08:25:01 | 2016-10-03T08:25:01 | 38,366,308 | 0 | 0 | null | false | 2015-08-25T13:45:43 | 2015-07-01T10:57:56 | 2015-08-05T11:50:14 | 2015-08-25T13:45:43 | 191 | 0 | 0 | 0 | Python | null | null | import unittest
import bright
from tests import settings
from bright.helpers import Forbidden, ResourceNotFound
class CollectionTests(unittest.TestCase):
@classmethod
def setupClass(self):
scopes = ["collections:read", "collections:write", "collections:like",
"artworks:read", "user:read"]
self.bright_api = bright.Bright(client_id=settings.client_id,
client_secret=settings.client_secret,
scopes=scopes,
**settings.kwargs
)
self.own_collections = self.bright_api.my_collections()["collections"]
self.own_artworks = self.bright_api.my_artworks()["artworks"]
def test_get_collection(self):
"Test we can get a collection"
contents = ['slug', 'thumbnail_url', 'is_private', 'name', 'artworks',
'id', 'curator', 'description', 'draft']
res = self.bright_api.get_collection(self.own_collections[0]["id"])
self.assertIn("collection", res)
for element in contents:
self.assertIn(element, res["collection"])
def test_get_all_collections(self):
"Test we can get all collections"
contents = ['slug', 'thumbnail_url', 'is_private', 'name', 'artworks',
'id', 'curator', 'description', 'draft']
res = self.bright_api.get_all_collections()
self.assertIn("pages", res)
self.assertIn("collections", res)
for collection in res["collections"]:
for element in contents:
self.assertIn(element, collection)
def test_create_collection(self):
"Test we can create a collection"
contents = ['slug', 'thumbnail_url', 'is_private', 'name', 'artworks',
'id', 'curator', 'description', 'draft']
res = self.bright_api.create_collection("test", "'tis but a test", False)
for element in contents:
self.assertIn(element, res["collection"])
self.bright_api.delete_collection(res["collection"]["id"])
def test_delete_collection(self):
"Test we can delete a collection"
res = self.bright_api.create_collection("test", "'tis but a test", False)["collection"]
me = self.bright_api.my_collections()["collections"]
self.assertIn(res, me)
self.bright_api.delete_collection(res["id"])
me = self.bright_api.my_collections()["collections"]
self.assertNotIn(res, me)
def test_update_collection(self):
"Test we can update a collection"
data = {
"name": "foobar"
}
orig = self.bright_api.get_collection(self.own_collections[0]["id"])["collection"]
res = self.bright_api.update_collection(orig["id"], data=data)["collection"]
self.assertEquals(data["name"], res["name"])
self.bright_api.update_collection(orig["id"], {"name": orig["name"]})
def test_add_to_collection(self):
"Test we can add artworks to collections"
collec = self.own_collections[0]
artworks_not_in_collec = [x for x in self.own_artworks
if x in collec["artworks"]]
artwork_id = artworks_not_in_collec[0]["id"]
res = self.bright_api.add_to_collection(collec["id"], artwork_id)
self.assertEquals({}, res)
res = self.bright_api.get_collection(collec["id"])
self.assertIn(artwork_id, res["collection"]["artworks"])
self.bright_api.remove_from_collection(collec["id"], artwork_id)
def remove_from_collection(self):
"Test we can remove artworks from collections"
collec = self.own_collections[0]
artworks_in_collec = [x for x in self.own_artworks
if x in collec["artworks"]]
artwork_id = artworks_in_collec[0]["id"]
res = self.bright_api.remove_from_collection(collec["id"], artwork_id)
self.assertEquals({}, res)
res = self.bright_api.get_collection(collec["id"])
self.assertNotIn(artwork_id, res["collection"]["artworks"])
self.bright_api.add_to_collection(collec["id"], artwork_id)
def test_like_collection(self):
"Test that we can like a collection"
own_id = self.bright_api.me()["user"]["id"]
all_collec = self.bright_api.get_all_collections()["collections"]
collec = list(filter(lambda c: not own_id in c["likes"], all_collec))[0]
res = self.bright_api.like_collection(collec["id"])
self.assertEquals({}, res)
res = self.bright_api.get_collection(collec["id"])
self.assertIn(own_id, res["collections"]["likes"])
self.bright_api.unlike_collection(collec["id"])
def test_unlike_collection(self):
"Test that we can unlike a collection"
own_id = self.bright_api.me()["user"]["id"]
all_collec = self.bright_api.get_all_collections()["collections"]
collec = list(filter(lambda c: not own_id in c["likes"], all_collec))[0]
_ = self.bright_api.like_collection(collec["id"])
res = self.bright_api.unlike_collection(collec["id"])
self.assertEquals({}, res)
res = self.bright_api.get_collection(collec["id"])
self.assertNotIn(own_id, res["collections"]["likes"])
self.bright_api.like_collection(collec["id"])
| UTF-8 | Python | false | false | 5,449 | py | 12 | test_collections.py | 9 | 0.594605 | 0.593136 | 0 | 139 | 38.201439 | 95 |
fajriansyah1127/cash_io | 14,224,931,697,925 | e74ddfda77120bb26d5e1669ddd51e3d6b99b8df | 7bde0b3ea9d47b5d53eb3687f2ca67378ad19a26 | /test.py | 4043f73557f39544d198bc34518c1fe6aecf99db | [] | no_license | https://github.com/fajriansyah1127/cash_io | f72da3743c58e2b5b642c303facaca8172f58804 | 9c5d3676ebfeb1cbdf9095bc760fae9f513a10f2 | refs/heads/main | 2023-06-03T07:19:11.262844 | 2021-06-17T00:49:49 | 2021-06-17T00:49:49 | 364,286,703 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request,render_template,jsonify, make_response
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restful import Resource, Api
from flask_cors import CORS
from mysql.connector import MySQLConnection
# import mysql.connector
import json
import uuid
import jwt
import datetime
app = Flask(__name__)
def DB():
filejson=open('config.json','r') # Get Data from config.json
config = json.loads(filejson.read()) #Parse the data
db = MySQLConnection(host=config['host'],user=config['user'],password=config['password'],database=config['database'])
return db
def queryToDb(db,query, value): #FOR CREATE UPDATE N DELETE
try:
cursor = db.cursor()
cursor.execute(query, value)
db.commit()
effectedRow = cursor.rowcount
cursor.close()
db.close()
except Exception as e:
return e
else:
return int(effectedRow)
@app.route("/",methods=['GET'])
def index():
return ''
################ PENJUALAN ########################################
@app.route("/penjualan",methods=['GET','POST'])
def penjualan():
if request.method == 'GET': # GET ALL Penjualan
db=DB()
cursor = db.cursor()
cursor.execute("SELECT * FROM penjualan")
penjualan = cursor.fetchall()
cursor.close()
db.close()
penjualanTodict = lambda r : dict(id=r[0],daftar_barang=r[1],total_harga=r[2],dibayar=r[3]
,kembalian=r[4])
return json.dumps(list(map(penjualanTodict,[r for r in penjualan])))
elif request.method == 'POST': # CREATE penjualan
data = request.get_json()
query = """INSERT INTO penjualan(daftar_barang,total_harga,dibayar,kembalian ) VALUES (%s,%s,%s,%s)"""
value = (data["daftar_barang"],data["total_harga"],data["dibayar"],data["kembalian"])
return jsonify({'message' : "Fail" if queryToDb(DB(),query,value) <1 else "Succes"})
################ PENJUALAN ########################################
@app.route("/penjualan/<id>",methods=['PUT','GET','DELETE'])
def penjualanCrud(id):
db = DB()
cur = db.cursor()
cur.execute(f"SELECT * FROM penjualan WHERE id='{id}'")
penjualan = cur.fetchone()
cur.close()
db.close()
penjualanTodict = lambda r : dict(id=r[0],daftar_barang=r[1],total_harga=r[2],dibayar=r[3]
,kembalian=r[4])
if penjualan==None:
return jsonify({"message" : "fail,penjualan Not Found"})
if request.method == "PUT":
newpenjualan = request.get_json()
db=DB()
cur=db.cursor()
query = f"""UPDATE penjualan
SET daftar_barang='{newpenjualan['daftar_barang']}',total_harga='{newpenjualan['total_harga']}',
dibayar='{newpenjualan['dibayar']}',
kembalian='{newpenjualan['kembalian']}'
WHERE penjualan.id='{id}'"""
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({'message' : 'Gagal update' if c<1 else 'Berhasil update'})
elif request.method == 'GET': # GET ONE transaksi
return jsonify({"message" : "success", "result" : penjualanTodict(penjualan)})
elif request.method == 'DELETE': # DELETE penjualan
# return ''
db=DB()
cur=db.cursor()
query = f"DELETE FROM penjualan WHERE id='{id}'"
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({"msg" : "Fail" if c<1 else "Success"})
################ PENJUALAN ########################################
################ TRANSAKSI ########################################
@app.route("/transaksi",methods=['GET','POST'])
def transaksi():
if request.method == 'GET': # GET ALL Penjualan
db=DB()
cursor = db.cursor()
cursor.execute("SELECT * FROM transaksi")
transaksi = cursor.fetchall()
cursor.close()
db.close()
transaksiTodict = lambda r : dict(id=r[0],tanggal_transaksi=r[1],keterangan=r[2],jenis_transaksi=r[3]
)
return json.dumps(list(map(transaksiTodict,[r for r in transaksi])))
elif request.method == 'POST': # CREATE penjualan
data = request.get_json()
query = """INSERT INTO transaksi(tanggal_transaksi,keterangan,jenis_transaksi) VALUES (%s,%s,%s)"""
value = (data["tanggal_transaksi"],data["keterangan"],data["jenis_transaksi"])
return jsonify({'message' : "Fail" if queryToDb(DB(),query,value) <1 else "Succes"})
@app.route("/transaksi/<id>",methods=['PUT','GET','DELETE'])
def transaksiCrud(id):
db = DB()
cur = db.cursor()
cur.execute(f"SELECT * FROM transaksi WHERE id='{id}'")
transaksi = cur.fetchone()
cur.close()
db.close()
transaksiTodict = lambda r : dict(id=r[0],tanggal_transaksi=r[1],keterangan=r[2],jenis_transaksi=r[3])
if transaksi==None:
return jsonify({"message" : "fail,transaksi Not Found"})
elif request.method == 'GET': # GET ONE transaksi
return jsonify({"message" : "success", "result" : transaksiTodict(transaksi)})
elif request.method == "PUT":
newtransaksi = request.get_json()
db=DB()
cur=db.cursor()
query = f"""UPDATE transaksi
SET tanggal_transaksi='{newtransaksi['tanggal_transaksi']}',keterangan='{newtransaksi['keterangan']}',
jenis_transaksi='{newtransaksi['jenis_transaksi']}'
WHERE transaksi.id='{id}'"""
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({'message' : 'Gagal update' if c<1 else 'Berhasil update'})
elif request.method == 'DELETE': # DELETE penjualan
# return ''
db=DB()
cur=db.cursor()
query = f"DELETE FROM transaksi WHERE id='{id}'"
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({"msg" : "Fail" if c<1 else "Success"})
@app.route("/transaksi/<tanggal_transaksi>",methods=['GET'])
def tanggal_transaksi(tanggal_transaksi):
db = DB()
cur = db.cursor()
cur.execute(f"SELECT * FROM transaksi WHERE tanggal_transaksi='{tanggal_transaksi}'")
transaksi_tanggal = cur.fetchone()
cur.close()
db.close()
transaksi_tanggalTodict = lambda r : dict(id=r[0],tanggal_transaksi=r[1],keterangan=r[2],jenis_transaksi=r[3])
if transaksi_tanggal==None:
return jsonify({"message" : "fail,transaksi Not Found"})
elif request.method == 'GET': # GET ONE transaksi
return jsonify({"message" : "success", "result" : transaksi_tanggalTodict(transaksi_tanggal)})
################ TRANSAKSI ########################################
################ BARANG_RETAIL ########################################
@app.route("/barang_retail",methods=['GET','POST'])
def barang_retail():
if request.method == 'GET': # GET ALL barang_retail
db=DB()
cursor = db.cursor()
cursor.execute("SELECT * FROM barang_retail")
barang_retail = cursor.fetchall()
cursor.close()
db.close()
barang_retailTodict = lambda r : dict(id=r[0],nama_barang=r[1],harga=r[2],tanggal_kadaluarsa=r[3]
,jumlah_barang=r[4],merk=r[5])
return json.dumps(list(map(barang_retailTodict,[r for r in barang_retail])))
elif request.method == 'POST': # CREATE barang_retail
data = request.get_json()
query = """INSERT INTO barang_retail(nama_barang,harga,tanggal_kadaluarsa,jumlah_barang,merk) VALUES (%s,%s,%s,%s,%s)"""
value = (data["nama_barang"],data["harga"],data["tanggal_kadaluarsa"],data["jumlah_barang"],data["merk"])
return jsonify({'message' : "Fail" if queryToDb(DB(),query,value) <1 else "Succes"})
@app.route("/barang_retail/<id>",methods=['PUT','GET','DELETE'])
def barang_retailCrud(id):
db = DB()
cur = db.cursor()
cur.execute(f"SELECT * FROM barang_retail WHERE id='{id}'")
barang_retail = cur.fetchone()
cur.close()
db.close()
barang_retailTodict =lambda r : dict(id=r[0],nama_barang=r[1],harga=r[2],tanggal_kadaluarsa=r[3],jumlah_barang=r[4],merk=r[5])
if barang_retail==None:
return jsonify({"message" : "fail,barang_retail Not Found"})
elif request.method == 'GET': # GET ONE barang_retail
return jsonify({"message" : "success", "result" :barang_retailTodict(barang_retail)})
elif request.method == "PUT":
newbarang_retail = request.get_json()
db=DB()
cur=db.cursor()
query = f"""UPDATE barang_retail
SET nama_barang='{newbarang_retail['nama_barang']}',
harga='{newbarang_retail['harga']}',
tanggal_kadaluarsa='{newbarang_retail['tanggal_kadaluarsa']}',
jumlah_barang='{newbarang_retail['jumlah_barang']}',
merk='{newbarang_retail['merk']}'
WHERE barang_retail.id='{id}'"""
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({'message' : 'Gagal update' if c<1 else 'Berhasil update'})
elif request.method == 'DELETE': # DELETE penjualan
# return ''
db=DB()
cur=db.cursor()
query = f"DELETE FROM barang_retail WHERE id='{id}'"
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({"msg" : "Fail" if c<1 else "Success"})
################ BARANG_RETAIL ########################################
################ BARANG_NONRETAIL ########################################
@app.route("/barang_nonretail",methods=['GET','POST'])
def barang_nonretail():
if request.method == 'GET': # GET ALL barang_nonretail
db=DB()
cursor = db.cursor()
cursor.execute("SELECT * FROM barang_nonretail")
barang_nonretail = cursor.fetchall()
cursor.close()
db.close()
barang_nonretailTodict = lambda r : dict(id=r[0],nama_barang=r[1],harga=r[2],status=r[3])
return json.dumps(list(map(barang_nonretailTodict,[r for r in barang_nonretail])))
elif request.method == 'POST': # CREATE barang_nonretail
data = request.get_json()
query = """INSERT INTO barang_nonretail(nama_barang,harga,status) VALUES (%s,%s,%s)"""
value = (data["nama_barang"],data["harga"],data["status"])
return jsonify({'message' : "Fail" if queryToDb(DB(),query,value) <1 else "Succes"})
@app.route("/barang_nonretail/<id>",methods=['PUT','GET','DELETE'])
def barang_nonretailCrud(id):
db = DB()
cur = db.cursor()
cur.execute(f"SELECT * FROM barang_nonretail WHERE id='{id}'")
barang_nonretail = cur.fetchone()
cur.close()
db.close()
barang_nonretailTodict =lambda r : dict(id=r[0],nama_barang=r[1],harga=r[2],status=r[3])
if barang_nonretail==None:
return jsonify({"message" : "fail,barang_nonretail Not Found"})
elif request.method == 'GET': # GET ONE barang_retail
return jsonify({"message" : "success", "result" :barang_nonretailTodict(barang_nonretail)})
elif request.method == 'DELETE': # DELETE penjualan
# return ''
db=DB()
cur=db.cursor()
query = f"DELETE FROM barang_nonretail WHERE id='{id}'"
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({"msg" : "Fail" if c<1 else "Success"})
elif request.method == "PUT":
newbarang_nonretail = request.get_json()
db=DB()
cur=db.cursor()
query = f"""UPDATE barang_nonretail
SET nama_barang='{newbarang_nonretail['nama_barang']}',
harga='{newbarang_nonretail['harga']}',
status='{newbarang_nonretail['status']}'
WHERE barang_nonretail.id='{id}'"""
cur.execute(query)
db.commit()
c=cur.rowcount
cur.close()
db.close()
return jsonify({'message' : 'Gagal update' if c<1 else 'Berhasil update'})
################ BARANG_NONRETAIL ########################################
if __name__ == "__main__":
app.run(debug=True) | UTF-8 | Python | false | false | 12,794 | py | 2 | test.py | 2 | 0.557371 | 0.55315 | 0 | 317 | 38.365931 | 130 |
OmerTariq-KAIST/dnn-based_indoor_localization | 9,302,899,163,815 | bcfcbedb2d3631e24b2ddd8a416ed2f9b4a41f0f | daf0b7391abf35e6bbd9d9b9759c23c2ce64a201 | /models/simo_hybrid_tut_batch-run.py | 26851f39d0e40551ce6a8cc1a1765692417281f4 | [
"MIT"
] | permissive | https://github.com/OmerTariq-KAIST/dnn-based_indoor_localization | 693ae4fe8a1801c7952e8e583710b49d41231b8d | 39e6a60fbd5095b714f6e158f1b933acc435a982 | refs/heads/master | 2023-03-15T13:28:10.074274 | 2020-12-09T04:00:59 | 2020-12-09T04:00:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, '../models')
sys.path.insert(0, '../utils')
from simo_hybrid_tut import simo_hybrid_tut
from mean_ci import mean_ci
import argparse
import datetime
import numpy as np
from num2words import num2words
# set coordinates loss weight using command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-C',
'--coordinates_weight',
help='loss weight for a coordinates; default 1.0',
default=1.0,
type=float)
args = parser.parse_args()
coordinates_weight = args.coordinates_weight
# set system parameters
num_runs = 20
# num_runs = 2 # for test
# set default parameters for simo_hybrid_tut()
gpu_id = 0
dataset = 'tut'
frac = 1.0
validation_split = 0.2
preprocessor = 'standard_scaler'
batch_size = 64
epochs = 100
optimizer = 'nadam'
dropout = 0.25
corruption_level = 0.1
dae_hidden_layers = ''
sdae_hidden_layers = [1024, 1024, 1024]
cache = True
common_hidden_layers = [1024]
floor_hidden_layers = [256]
coordinates_hidden_layers = [256]
floor_weight = 1.0
verbose = 0
# inialize results arrays
flr_accs = np.empty(num_runs)
mean_error_2ds = np.empty(num_runs)
median_error_2ds = np.empty(num_runs)
mean_error_3ds = np.empty(num_runs)
median_error_3ds = np.empty(num_runs)
elapsedTimes = np.empty(num_runs)
# run experiments
for i in range(num_runs):
print("\n########## Coordinates loss weight={0:.2f}: {1:s} run ##########".format(coordinates_weight, num2words(i+1, to='ordinal_num')))
rst = simo_hybrid_tut(gpu_id, dataset, frac, validation_split,
preprocessor, batch_size, epochs, optimizer,
dropout, corruption_level, dae_hidden_layers,
sdae_hidden_layers, cache, common_hidden_layers,
floor_hidden_layers, coordinates_hidden_layers,
floor_weight, coordinates_weight, verbose)
flr_accs[i] = rst.flr_acc
mean_error_2ds[i] = rst.mean_error_2d
median_error_2ds[i] = rst.median_error_2d
mean_error_3ds[i] = rst.mean_error_3d
median_error_3ds[i] = rst.median_error_3d
elapsedTimes[i] = rst.elapsedTime
# print out results
base_file_name = '../results/test/simo_hybrid_tut/tut/cw{0:.1f}_'.format(coordinates_weight)
with open(base_file_name + 'floor_accuracy.csv', 'a') as output_file:
output_file.write("{0:.2f},{1:.4f},{2:.4f},{3:.4f},{4:.4f}\n".format(coordinates_weight, *[i*100 for i in mean_ci(flr_accs)], 100*flr_accs.max(), 100*flr_accs.min()))
with open(base_file_name + 'mean_error_2d.csv', 'a') as output_file:
output_file.write("{0:.2f},{1:.4f},{2:.4f},{3:.4f},{4:.4f}\n".format(coordinates_weight, *mean_ci(mean_error_2ds), mean_error_2ds.max(), mean_error_2ds.min()))
with open(base_file_name + 'mean_error_3d.csv', 'a') as output_file:
output_file.write("{0:.2f},{1:.4f},{2:.4f},{3:.4f},{4:.4f}\n".format(coordinates_weight, *mean_ci(mean_error_3ds), mean_error_3ds.max(), mean_error_3ds.min()))
| UTF-8 | Python | false | false | 3,033 | py | 25 | simo_hybrid_tut_batch-run.py | 24 | 0.658424 | 0.619189 | 0 | 83 | 35.542169 | 170 |
NilNoyon/BACS | 11,476,152,661,113 | 408286dd3975c75403ca04ccfecb3a5c83e370f1 | c6c5dbf05be9dac2a2a4533a1efa7bcaf1c23a06 | /BACS/clients/urls.py | 41f59d1aabf510888c5627e231d46434ac79c170 | [] | no_license | https://github.com/NilNoyon/BACS | 9c54e4a440de79a8a39e13c29956583a5476c07f | 6efdb3d18caa5c08ffef3632afd7d622aedee3b0 | refs/heads/master | 2022-12-13T13:25:44.973194 | 2019-08-26T21:02:24 | 2019-08-26T21:02:24 | 195,283,228 | 0 | 0 | null | false | 2022-12-08T05:55:36 | 2019-07-04T17:59:26 | 2019-08-26T21:02:38 | 2022-12-08T05:55:36 | 7,739 | 0 | 0 | 4 | HTML | false | false | from django.urls import path
from clients.views import *
from . import views
# CLIENTS SECTION
urlpatterns = [
path('dashboard/', views.dashboard_client, name='client_dashboard'),
path('update_profile/', views.update_profile, name='update_profile'),
path('my_profile/', views.my_profile, name='my_profile'),
path('view_given_amount/', views.view_given_amount, name='view_given_amount'),
path('given_amount/', views.given_amount, name='given_amount'),
path('view_cost_info/', views.view_cost_info, name='view_cost_info'),
] | UTF-8 | Python | false | false | 547 | py | 35 | urls.py | 17 | 0.700183 | 0.700183 | 0 | 13 | 41.153846 | 82 |
alay3168/XGTestProjects | 15,144,054,688,826 | 16d96f77e59b21103e0b977d04f0909830cb57b2 | 503313e19bfed3f842391f1c2854b7198bb5d09c | /camrea_web_auto_test/TestCase/pytest_demo.py | 326654d01625dccaa00b8a7b314aaf7b763135a2 | [] | no_license | https://github.com/alay3168/XGTestProjects | 264e84aab33f968a704f533577799617175c619b | 01bd4ed3015b28284043cccab54902bd58ce24f8 | refs/heads/master | 2022-11-02T11:11:04.625750 | 2020-10-12T05:04:49 | 2020-10-12T05:04:49 | 250,506,762 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
# content of test_sample.py
def func(x):
return x + 1
def test_answer():
assert func(3) == 5
pytest.main()
| UTF-8 | Python | false | false | 178 | py | 317 | pytest_demo.py | 187 | 0.61236 | 0.589888 | 0 | 12 | 13.833333 | 27 |
dneff/advent2019 | 5,927,054,893,244 | 17084620abe8c3c0e42f46cfc78399a00ae5e208 | ae9a3122bf9fdbb9c38de46acfcefb2440bd4706 | /11/solution.py | 43570d10d8fb5144b4f526bb2a622634468164d5 | [] | no_license | https://github.com/dneff/advent2019 | 683fe9426e196b6a40cd28e6854b22a0a2ddb124 | 5b87b3d20b23370bd623b17e19f4cfce605e2eb6 | refs/heads/master | 2020-11-24T00:50:01.552751 | 2020-01-13T01:27:33 | 2020-01-13T01:27:33 | 227,889,697 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from itertools import permutations
from IntCode import IntCode, InputInterrupt, OutputInterrupt
from collections import defaultdict
def newOrientation(orientation, turn):
if turn == 0:
orientation = (orientation - 1) % 4
elif turn == 1:
orientation = (orientation + 1) % 4
return orientation
def newLocation(loc, direction):
move = {
'N': (0, 1),
'E': (1, 0),
'S': (0, -1),
'W': (-1, 0)
}
delta = move[direction]
return loc[0] + delta[0], loc[1] + delta[1]
def main():
with open('input1.txt', 'r') as file:
program = file.read().strip()
panels = defaultdict(int)
x, y = 0, 0
panels[(x,y)] = 0
direction = ['N', 'E', 'S', 'W']
orientation = 0
turn = False
comp1 = IntCode(program)
comp1.push(panels[(x, y)])
painted = 0
painted_panels = []
while not comp1.complete:
try:
comp1.run()
except(InputInterrupt):
input = panels[(x, y)]
comp1.push(input)
except(OutputInterrupt):
out = comp1.pop()
if turn:
orientation = newOrientation(orientation, out)
x, y = newLocation((x, y), direction[orientation])
else:
if out == 1 and panels[(x, y)] == 0 and (x, y) not in painted_panels:
painted += 1
painted_panels.append((x, y))
panels[(x,y)] = out
turn = not turn
print(f"Solution 1: {painted} panels are painted at least once.")
# -=-=-=- Part 2
panels = defaultdict(int)
x, y = 0, 5
panels[(x,y)] = 1
direction = ['N', 'E', 'S', 'W']
orientation = 0
turn = False
comp2 = IntCode(program)
comp2.push(panels[(x, y)])
while not comp2.complete:
try:
comp2.run()
except(InputInterrupt):
input = panels[(x, y)]
comp2.push(input)
except(OutputInterrupt):
out = comp2.pop()
if turn:
orientation = newOrientation(orientation, out)
x, y = newLocation((x, y), direction[orientation])
else:
panels[(x,y)] = out
turn = not turn
white_panels = [x for x in panels.keys() if panels[x] == 1]
max_row = max([x[1] for x in white_panels])
max_col = max([x[0] for x in white_panels])
print("Solution 2 (registration identifier):")
for r in range(max_row + 1, -1, -1):
row = []
for c in range(max_col + 1):
if (c,r) in white_panels:
row.append('*')
else:
row.append(' ')
print(f"{''.join(row)}")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,808 | py | 27 | solution.py | 26 | 0.494302 | 0.475427 | 0 | 111 | 24.297297 | 85 |
thoas/django-metadata | 4,252,017,632,976 | d54fd069837e037080f271336ca58e5118455b9a | 132e19731444eb30c6c0cea5a3120030af6a9c2a | /metadata/connection.py | 0ea24135dbb2b9881fab2b80ff3f47ad1aac1ec7 | [] | no_license | https://github.com/thoas/django-metadata | d08e5ddf389acac789488a6cd96e976cbb50e13f | a6e3eeac79e3ed36afa1c652db5a8a3d3473507c | refs/heads/master | 2021-01-15T15:47:21.906343 | 2018-06-15T13:02:14 | 2018-06-15T13:02:14 | 10,442,157 | 21 | 1 | null | false | 2014-10-02T13:53:26 | 2013-06-02T20:58:01 | 2014-10-02T09:32:49 | 2014-10-02T13:53:25 | 136 | 8 | 1 | 0 | Python | null | null | from . import settings
from .utils import get_client
client = get_client(settings.REDIS_CONNECTION,
connection_class=settings.REDIS_CONNECTION_CLASS)
| UTF-8 | Python | false | false | 172 | py | 11 | connection.py | 7 | 0.715116 | 0.715116 | 0 | 6 | 27.666667 | 69 |
alimgee/-mollyrose--in-django | 6,966,436,962,629 | fb8b0affbafc8e601d7fa3f2464a959a0f1b2c60 | 5acf3fba7c2937f4b6f22967cae9d26855afbdfb | /help/views.py | cd7690deb0d19b71c4f05e58345632465f56d30e | [] | no_license | https://github.com/alimgee/-mollyrose--in-django | c67638d56409a1a35af3c648be5de55c9a0d5f53 | 07858c332bb7c18dfd61e22dc2b5cd57baccfd37 | refs/heads/master | 2022-12-14T02:27:23.442925 | 2020-09-01T09:11:37 | 2020-09-01T09:11:37 | 287,684,126 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import Organisations
def help(request):
''' function to render homepage'''
organisations = Organisations.objects.all().order_by('position')
context ={
'Organisations': organisations
}
return render (request, 'help/index.html', context) | UTF-8 | Python | false | false | 315 | py | 30 | views.py | 11 | 0.701587 | 0.701587 | 0 | 12 | 25.333333 | 68 |
agussarcee/agussarcee | 13,907,104,121,041 | a6edc55a27cb5aebde9b70434d46ec31e264baf9 | d1cf2b5cf827762bca35c3ebe12b192d9ebec706 | /cat.py | f6250fa489b7da7ee3cd04f60e49cc15b919db66 | [] | no_license | https://github.com/agussarcee/agussarcee | 4e07b5a39673ebb31c199ba281002dd69986f3ad | fdf79eb52c389726a13e4dccf920a0162e312798 | refs/heads/master | 2020-07-20T15:51:40.398474 | 2019-09-13T00:13:58 | 2019-09-13T00:13:58 | 206,672,259 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #muestra los bytes en texto
# 1 - Abrir el archivo
# 2 - Leer el archivo
# 3 - Imprimo en pantalla
# 4 - Cerrar el archivo
NOMBRE = "lorem.txt"
archivo = open(NOMBRE,'r')
contenido = archivo.read ()
print (contenido)
archivo.close()
| UTF-8 | Python | false | false | 235 | py | 8 | cat.py | 8 | 0.693617 | 0.676596 | 0 | 11 | 20.272727 | 27 |
StepanBarantsev/DeepLearn_Numbers | 1,288,490,231,494 | bc4692df50f326a702223a0268da2c65823a8580 | e4526526d8bfb39199f165ebfbf69ccca5376532 | /main2.py | 2d426448687e21e4fbed5d34c33c875f753cd3b1 | [] | no_license | https://github.com/StepanBarantsev/DeepLearn_Numbers | 4d4dd1b3cbe66470f949d71771ded6d61051d862 | afd9374ad338be36a78e472b64d1d44794898330 | refs/heads/master | 2020-07-29T02:29:47.385976 | 2019-09-24T13:39:27 | 2019-09-24T13:39:27 | 209,632,737 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from skimage.io import imread, imsave
from helper import parse_img, parse_labels, write_weigth_to_file, get_weight
class Neuro2:
# Количество слоев равно количеству имен файлов
# weights_from_file -- булева переменная, отвечающая за то берем ли мы веса из файла
# shapes -- кортежи содержащие в себе размерности матриц весов (список кортежей)
def __init__(self, filenames, weights_from_file, shapes):
self.filenames = filenames
self.shapes = shapes
self.weights = []
self.get_weights(weights_from_file)
def get_weights(self, from_file):
if from_file:
for i, filename in enumerate(self.filenames):
self.weights.append(get_weight(filename, self.shapes[i][0], self.shapes[i][1]))
else:
for i in range(len(self.filenames)):
self.weights.append(get_weight(None, self.shapes[i][0], self.shapes[i][1]))
def write_weights_to_file(self):
for i in range(len(self.filenames)):
write_weigth_to_file(self.filenames[i], self.weights[i])
def learn(self, iterations):
# Получам пикчи и ожидаемые результаты
array_imgs = parse_img('train-images-idx3-ubyte', 60000)
expected = parse_labels('train-labels-idx1-ubyte')
for something in range(iterations):
for index, img in enumerate(array_imgs):
exp = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
exp[expected[index]] += 1
alpha = 0.0000000001
layers = [np.array(img)]
for i in range(len(self.weights)):
layers.append(np.array(layers[i].dot(self.weights[i])))
layers = np.array(layers)
final_res = layers[len(layers) - 1]
deltas = [(np.array(final_res - exp))]
# У нас должно в итоге быть 2 дельты. На 0 вес умножать не нужно. Поэтому -1
for i in range(len(self.weights) - 1):
deltas.append(np.array(deltas[i].dot(self.weights[len(self.weights) - 1 - i].T)))
deltas = np.array(deltas)
for i in range(len(deltas) // 2):
deltas[i], deltas[len(deltas) - 1 - i] = deltas[len(deltas) - 1 - i], deltas[i]
for i in range(len(self.weights)):
self.weights[i] -= alpha * np.matrix(layers[i]).T.dot(np.matrix(deltas[i]))
print('Итерация номер %s' % something)
self.write_weights_to_file()
def predict(self, img):
result = img.dot(self.weights[0])
for i in range(1, len(self.weights)):
result = result.dot(self.weights[i])
m = result[0]
ind = 0
for i, res in enumerate(result):
if res > m:
m = res
ind = i
return ind, m
def predict_by_mnist(self):
array_imgs = parse_img('t10k-images-idx3-ubyte', 10000)
expected = parse_labels('t10k-labels-idx1-ubyte')
error = 0
for i, img in enumerate(array_imgs):
ind, m = self.predict(img)
print('Число на картинке это %s. На самом деле %s' % (ind, expected[i]))
if ind != expected[i]:
error += 1
print('Количество ошибок %s' % error)
print('Всего чисел было %s' % len(expected))
# o = Neuro2(['w1.txt', 'w2.txt'], weights_from_file=False, shapes=[(28 * 28, 40), (40, 10)])
o = Neuro2(['w1.txt'], weights_from_file=False, shapes=[(28 * 28, 10)])
o.learn(1)
o.predict_by_mnist()
| UTF-8 | Python | false | false | 3,908 | py | 11 | main2.py | 7 | 0.562032 | 0.536497 | 0 | 83 | 42.361446 | 101 |
AK1737/testRepo | 9,491,877,744,278 | e847a342aabad844c6260233639122644857e271 | b28f166e7f81e3d58868f5d3be6e07fe9483a8af | /Tasks/Kotliarevskiy/bot/bot1.py | fe910a2475a9905542b94a6110029521d143a324 | [] | no_license | https://github.com/AK1737/testRepo | f235a15ecaabe63823e100aac9cfceef6ae69580 | 670b6cd516fb9b6c18ef3a7adb538a7ddb893ed9 | refs/heads/master | 2020-06-03T04:17:26.729074 | 2019-08-05T14:40:03 | 2019-08-05T14:40:03 | 191,435,022 | 0 | 0 | null | true | 2019-06-11T19:14:29 | 2019-06-11T19:14:28 | 2019-05-24T21:23:29 | 2019-05-24T21:23:27 | 83 | 0 | 0 | 0 | null | false | false | from vk_api.longpoll import VkLongPoll, VkEventType
import vk_api
tokenn='d0a27ef71602aca5d4ab459fdf5b3b9e969f6c7f3c39a936949774ef086c87d375f087b8b2aecc51a582f'
vk_session = vk_api.VkApi(token=tokenn)
from vk_api.longpoll import VkLongPoll, VkEventType
longpoll = VkLongPoll(vk_session)
vk = vk_session.get_api()
while True:
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.text and not(event.from_me):
print('lett')
vk_session.method('messages.send', {'user_id': event.user_id, 'message': event.text, 'random_id': 0})
| UTF-8 | Python | false | false | 593 | py | 24 | bot1.py | 24 | 0.735245 | 0.647555 | 0 | 15 | 38.466667 | 113 |
SemchenkoSergey/port_status_crontab | 19,112,604,501,630 | 572e4e35ed88c3d2978b9f686ffc3efbf7087bce | 4fba426a605e3d29292c3e3b571767f3c829c728 | /resources/Settings.py | 9f6743f00c5ded49c49171ac983639e924b3ccdc | [] | no_license | https://github.com/SemchenkoSergey/port_status_crontab | 341225b9e90f3fb3d244a00c974456300da89749 | fa59036ac0c2e12d3d3d5c7322599362cab08bf8 | refs/heads/master | 2020-03-25T16:58:00.443976 | 2018-08-13T08:25:55 | 2018-08-13T08:25:55 | 143,956,660 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: utf-8
### Port_Status ###
# Учетные данные DSLAM
login_5600 = ''
password_5600 = ''
login_5616 = ''
password_5616 = ''
#Количество потоков выполнения
threads = 5
#За сколько дней хранить записи
days = 35
#Список DSLAM
hosts = (('ip', '5600'),
('ip', '5616'))
# Mysql
db_host = 'localhost'
db_user = 'inet'
db_password = 'inet'
db_name = 'inet'
### Session_Count ###
threads_count = 3
# Onyma
onyma_login = ''
onyma_password = ''
| UTF-8 | Python | false | false | 535 | py | 9 | Settings.py | 7 | 0.609071 | 0.546436 | 0 | 30 | 14.433333 | 31 |
KyungHoon0126/Algorithm | 6,390,911,367,099 | 078d34e1b6c73623804e9634ac3ada61a5f1c5f0 | f153a36b5e211690ded1af00c0160eebd2add1ca | /이것 취업을 위한 코딩 테스트다 with 파이썬/Greedy/숫자카드게임.py | d89707d43a666928518aaf9ca256bfb8555589c4 | [] | no_license | https://github.com/KyungHoon0126/Algorithm | 47551bbe22c70eac04ed518c2c9c1f65d48ee5b9 | 8369f0e1103d282cdc138666add65dd0ca926e70 | refs/heads/master | 2021-08-17T08:32:09.970502 | 2021-06-22T12:52:22 | 2021-06-22T12:52:22 | 214,456,043 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 1
# n, m = map(int, input().split())
#
# result = 0
# for i in range(n):
# data = list(map(int, input().split()))
# # 현재 줄에서 '가장 적은 수' 찾기
# min_value = min(data)
# # '가장 작은 수'들 중에서 가장 큰 수 찾기
# result = max(result, min_value)
#
# print(result)
# 2
n, m = map(int, input().split())
result = 0
# 한 줄씩 입력받아 확인
for i in range(n):
data = list(map(int, input().split()))
min_value = 10001
for k in data:
min_value = min(min_value, k)
# '가장 적은 수'들 중에서 가장 큰 수 찾기
result = max(result, min_value)
print(result)
| UTF-8 | Python | false | false | 659 | py | 124 | 숫자카드게임.py | 107 | 0.540395 | 0.524237 | 0 | 31 | 16.967742 | 44 |
flashlan/Curso-Ciencia-da-Computacao-com-Python-Parte-2-Alternative-Solutions | 14,929,306,324,614 | 392a3a656eabd40f68c5c863f3be73cd11a27010 | aca98ac45978308c69c02fd4b56708ccff9f0921 | /dimensoes_matriz.py | c8f087e7a4eecce5b6ae7b9dea14c03b1683cbc1 | [] | no_license | https://github.com/flashlan/Curso-Ciencia-da-Computacao-com-Python-Parte-2-Alternative-Solutions | eaa08b8c698d623dc3f34df5792b7a4f58b3fc8a | c5d15d6d51d9c03b263bc338c25a623a7ff2f447 | refs/heads/master | 2022-08-01T14:53:02.805633 | 2020-05-18T15:24:25 | 2020-05-18T15:24:25 | 260,817,957 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri May 1 12:40:00 2020
@author: pcstream
"""
def dimensoes(matriz):
''' (matriz) --> recebe uma matriz como parâmetro e
imprime as dimensões da matriz recebida, no formato iXj'''
matriz = str(matriz)
colunas = matriz.count("[") - 1
linhas = int((matriz.count(",") + 1) / colunas)
out = print(str(colunas) + 'X' + str(linhas))
return out
| UTF-8 | Python | false | false | 428 | py | 21 | dimensoes_matriz.py | 20 | 0.58216 | 0.549296 | 0 | 15 | 26.4 | 62 |
onurmatik/LambdaTwitterOAuth | 13,417,477,843,111 | 12fdca52ee430ff2d7397cf19a24b755f5a50f37 | ed84e8fce05f96f088f4c81f4677864fc4c15e4d | /auth.py | 8b110f7fe3fde5a6e0ea1c2201207411e755cd8a | [] | no_license | https://github.com/onurmatik/LambdaTwitterOAuth | 1e28f2434eac34718570c926c63a643a16b07ef3 | b46dc433b53f6e4a93c838fb306e59bc92a400f2 | refs/heads/master | 2021-01-17T17:54:37.283222 | 2016-10-12T15:00:01 | 2016-10-12T15:00:01 | 70,708,232 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import boto3
import json
import twitter
from settings import *
def get_sign_in_token(event, context):
client = twitter.UserClient(
CONSUMER_KEY,
CONSUMER_SECRET,
)
token = client.get_signin_token(
callback_url=CALLBACK_URL
)
return {
'location': token.auth_url,
'cookie': 'token=%s;PATH=/;' % (
token.oauth_token_secret,
),
}
def get_access_token(event, context):
token = event['queryParams']['oauth_token']
secret = event['headers']['Cookie'].split('=')[1]
client = twitter.UserClient(
CONSUMER_KEY,
CONSUMER_SECRET,
token,
secret,
)
token = client.get_access_token(event['queryParams']['oauth_verifier'])
# save the token to dynamodb
dynamodb = boto3.resource(
'dynamodb',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION,
endpoint_url=DYNAMODB_ENDPOINT,
)
table = dynamodb.Table(DYNAMODB_TABLE)
table.put_item(
Item={
'user_id': int(token['user_id']),
'app_name': TWITTER_APP_NAME,
'data': json.dumps(token),
},
#ConditionExpression='attribute_not_exists',
)
return {
'token': token,
}
| UTF-8 | Python | false | false | 1,327 | py | 2 | auth.py | 1 | 0.574228 | 0.571967 | 0 | 54 | 23.574074 | 75 |
JiajieMo/Hierarchical-Image-Matting-Model-for-Blood-Vessel-Segmentation-in-Fundus-images | 17,386,027,620,053 | 19bdc78f15d4a5b8f4d94a6caed1e89544a417bc | 68b59cdc67b66c2aafacd4718a0568c58cf64d74 | /Vessel Skeleton Extraction.py | c38087bf033c8f5dec7530c8cbc186f761bd44dd | [] | no_license | https://github.com/JiajieMo/Hierarchical-Image-Matting-Model-for-Blood-Vessel-Segmentation-in-Fundus-images | d7ee9bc28cf727fd50ac9dfb4f8619649959d4a3 | 627fdfb2103dc6c0f0cca0bf70bfbaf5069cd24f | refs/heads/master | 2022-12-12T06:33:30.331869 | 2020-09-04T03:35:45 | 2020-09-04T03:35:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 15:18:18 2019
@author: Adithya
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from PIL import Image
from skimage.exposure import rescale_intensity
from scipy.ndimage import correlate,convolve
import natsort
path = os.path.join(os.getcwd(), '')
path_mask = os.path.join(os.getcwd(), 'training', 'mask')
path_results = os.path.join(os.getcwd(), 'Binary Images')
files_avail = glob.glob(os.path.join(path, '*.tif'))
masks = os.listdir(path_mask)
masks = natsort.natsorted(masks)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(21,21))
def convolve2D(image,kernel):
(iH, iW) = image.shape
(kH, kW) = kernel.shape
pad = (kW - 1) // 2
img = cv2.copyMakeBorder(image, pad, pad, pad, pad, cv2.BORDER_REPLICATE)
w = np.zeros((iH,iW), dtype = "float32")
output = np.zeros((iH, iW), dtype = "float32")
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
roi = img[y - pad:y + pad + 1, x - pad:x + pad + 1]
output[y - pad,x - pad] = (roi * kernel).sum()
w = image - output
output = rescale_intensity(output, in_range = (0,255))
output = (output * 255).astype("uint8")
return output, w
for file,m_ad in zip(files_avail, masks):
C_curr = cv2.imread(file,0)
#C_curr = clahe.apply(C_next)
#mask = cv2.imread(os.path.join(path_mask, 'frame0.png'), 0)
#C_next = cv2.cvtColor(C_next, cv2.COLOR_BGR2GRAY)
#C_next = ~C_next
#Defining the filter
C1 = 1./16.
C2 = 4./16.
C3 = 6./16.
W = []
t = True
KSize = [5,9,17]
for scale, KS2 in enumerate(KSize):
KS2 = int(KS2/2)
kernel = np.zeros((1,KSize[scale]), dtype = np.float32)
kernel[0][0] = C1
kernel[0][KSize[scale]-1] = C1
kernel[0][int(KS2/2)] = C2
kernel[0][int(KSize[scale]/4+KS2)] = C2
kernel[0][KS2] = C3
k = kernel.T * kernel
#C_next = cv2.filter2D(C_curr, -1, k)
#C_next = cv2.sepFilter2D(C_curr, cv2.CV_32F, kernelX = kernel, kernelY = kernel)
#C_next = convolve(C_curr, k, mode = 'mirror')
C_next, w = convolve2D(C_curr, k)
C_curr = C_next
if(t):
t = False
continue
W.append(w)
# Combining all the wavelet scales
Iiuw = W[0] + W[1]
mask = cv2.imread(os.path.join(path_mask,m_ad),0)
per_px_inc = 0.22
epsilon = 0.03
t = np.sort(np.ravel(Iiuw))
thres = t[int(per_px_inc * len(t)) - 1] + epsilon
bw = Iiuw < thres
bw = bw.astype(np.uint8) * 255
fil_bw = cv2.bitwise_and(bw,bw, mask = mask)
m = np.ones_like(mask) * 255
m1 = np.ones_like(mask) * 255
_, contours, _ = cv2.findContours(fil_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if(area < 759.71):
if(area < 43.7):
cv2.drawContours(m1,[cnt],-1,0,-1)
else:
(x, y, w, h) = cv2.boundingRect(cnt)
extent = area / float(w * h)
VRatio = w / float(h)
if((VRatio >= 2.2)and(extent < 0.25)):
cv2.drawContours(m1,[cnt],-1,0,-1)
cv2.drawContours(m,[cnt],-1,0,-1)
T3 = cv2.bitwise_and(fil_bw, m, mask = mask)
vse = cv2.bitwise_and(fil_bw, m1, mask = mask)
#Iiuw = Iiuw.astype(np.uint8)
#newfin = cv2.erode(Iiuw, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
#Iiuw = ~Iiuw
cv2.imwrite(os.path.join(path_results, os.path.basename(file)), fil_bw)
cv2.imwrite(os.path.join(os.getcwd(),'T3', os.path.basename(file)), T3)
cv2.imwrite(os.path.join(os.getcwd(),'Final_VSE', os.path.basename(file)), vse)
"""for i in range(Iiuw.shape[0]):
for j in range(Iiuw.shape[1])
t, th2 = cv2.threshold(Iiuw, 3, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img = Iiuw * mask
img = ((Iiuw > (t + 0.155 * 255)) * 255).astype(np.uint8)
img = ~img
img = cv2.bitwise_and(img,img, mask = mask)
cv2.imshow('T3', T3)
cv2.imshow('T4', T4)
cv2.waitKey(0)
cv2.destroyAllWindows()
mask = np.ones(img.shape[:2], dtype="uint8") * 255"""
for file, m_ad in zip(os.listdir(path_results), masks):
fil_bw = cv2.imread()
mask = cv2.imread(os.path.join(path_mask,m_ad),0)
cv2.imwrite()
| UTF-8 | Python | false | false | 4,525 | py | 8 | Vessel Skeleton Extraction.py | 5 | 0.560221 | 0.514254 | 0 | 135 | 31.518519 | 91 |
DLu/askbot_crawler | 2,714,419,381,739 | 2985f7a4107c3f1c9e5fc36d179702616179b4cf | da9f1dca0b796ad0fa828d4d66b5e60b232b1aaa | /html_generation.py | 3c47cbd46b1287487269e413d99b95a74cfda576 | [] | no_license | https://github.com/DLu/askbot_crawler | 7931038ae8faf6b6a416b6ef87021eecfe7fe589 | c41f69d4d8768a0df8e82081453e6bf3494055e6 | refs/heads/master | 2021-05-16T02:24:12.690704 | 2018-06-19T20:51:18 | 2018-06-19T20:51:18 | 28,569,962 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
JQUERY_LINKS = """
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://cdn.datatables.net/1.10.4/js/jquery.dataTables.min.js"></script>
<link href="http://cdn.datatables.net/1.10.4/css/jquery.dataTables.css" rel="stylesheet" type="text/css"/>
"""
INFINITY_SORT = """
<script>
$.fn.dataTable.ext.type.order['infinity-pre'] = function ( d ) {
if(isNaN(d)){
return 10000;
}
return parseInt(d);
};
</script>
"""
def header(title="ROS Answered", extra='', relative='', hide=False):
s = '<head>\n'
s += '<title>%s</title>\n' % title
s += extra
s += '<link href="http://fonts.googleapis.com/css?family=Roboto+Condensed" rel="stylesheet" type="text/css">\n'
s += '<link href="%sanswered.css" rel="stylesheet" type="text/css"/>' % relative
if hide:
s += '<script src="%sHide.js"></script>' % relative
s += '</head>\n'
return s
def generate_table(M, id="rostable", params={}):
if len(M) == 0:
return ''
s = '<table class="display" id="%s">\n' % id
s += '<thead>\n<tr><th>'
s += '<th>'.join(M[0].keys())
s += '\n</thead>\n<tbody>\n'
for m in M:
s += '<tr>'
for k, v in m.iteritems():
s += '<td>' + str(v)
s += '\n'
s += '</tbody>\n</table>\n'
s += """
<script>
$(document).ready(function() {
$('#%s').DataTable(%s);
} );
</script>""" % (id, json.dumps(params))
# HACK
for k, v in params.iteritems():
if 'Callback' in k:
s = s.replace('"%s"' % v, v)
return s
| UTF-8 | Python | false | false | 1,589 | py | 11 | html_generation.py | 9 | 0.53241 | 0.520453 | 0 | 58 | 26.396552 | 115 |
qingkediguo/QQMusicAPI | 11,441,792,903,657 | 25de8732cfbb07b8db04f002a852b42a55152cde | b9eba831b971cc3bc6d10c9cbfad759bb382d152 | /QQMusicAPI/song.py | dc41f08d20eea18b07691cbd66ad375c1a862426 | [] | no_license | https://github.com/qingkediguo/QQMusicAPI | 564315f3e9d2842133d5fbee1788a2f89b4018b5 | 1ea6af0415bc3620fdbe735a63fc5b6242e67d85 | refs/heads/master | 2020-04-16T17:46:15.121908 | 2018-12-22T13:09:23 | 2018-12-22T13:09:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
import time
import json
import base64
import re
import math
import requests
from bs4 import BeautifulSoup
class Song(object):
def __init__(self, mid, **kwargs):
self.guid = int(random.random() * 2147483647) * int(time.time() * 1000) % 10000000000
self.headers = {
"cookie": 'pgv_pvi=23333333; pgv_si=23333333; pgv_pvid={}; qqmusic_fromtag=30'.format(self.guid),
}
self.mid = mid
self.media_mid = kwargs.get('media_mid')
self.title = kwargs.get('title')
self.singer = kwargs.get('singer')
self.album = kwargs.get('album')
self.filename = 'C400{}.m4a'.format(self.mid)
self.kwargs = kwargs
self.lyric = None
self.song_id = None
self.song_name = None
self.song_title = None
self.song_subtitle = None
self.info = None
self.image = None
self.comment_total = None
self.comment_page_size = None
self.hot_comment = None
@property
def url(self):
"""
歌曲在 QQ 音乐 web 版中的页面链接
:return:
"""
return 'https://y.qq.com/n/yqq/song/{}.html'.format(self.mid)
@property
def song_url(self):
"""
歌曲的播放链接,每次访问生成一个新的
:return:
"""
return 'http://dl.stream.qqmusic.qq.com/{}?vkey={}&guid={}&fromtag=30'.format(self.filename, self._get_vkey(),
self.guid)
@property
def lyric_url(self):
return 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_new.fcg?g_tk=753738303&songmid=' + self.mid
def get_lyric(self):
"""
获得歌词和翻译(如果有的话)
:return: { lyric: ..., trans: ...}
"""
lrc_url = self.lyric_url
headers = {
'Referer': 'https://y.qq.com/portal/player.html',
'Cookie': 'skey=@LVJPZmJUX; p', # 此处应该对应了 g_tk 和 skey 的关系,因此需要提供 skey 参数才可以获取
# 我已经退出登录这个 skey 了,因此不会有安全问题的
}
resp = requests.get(lrc_url, headers=headers)
lrc_dict = json.loads(resp.text[18:-1])
data = {'lyric': '', 'trans': ''}
if lrc_dict.get('lyric'):
data['lyric'] = base64.b64decode(lrc_dict['lyric']).decode()
if lrc_dict.get('trans'):
data['trans'] = base64.b64decode(lrc_dict['trans']).decode()
self.lyric = data
return self.lyric
def _get_vkey(self):
url = 'https://c.y.qq.com/base/fcgi-bin/fcg_music_express_mobile3.fcg'
params = {
'format': 'json',
'platform': 'yqq',
'cid': '205361747',
'songmid': self.mid,
'filename': self.filename,
'guid': self.guid
}
rst = requests.get(url, params=params)
return json.loads(rst.text)['data']['items'][0]['vkey']
def extract(self):
self.get_lyric()
self._get_song_info()
self._get_hot_comment()
def _get_song_info(self):
"""
通过页面获得信息
:return:
"""
url = 'https://y.qq.com/n/yqq/song/{}.html'.format(self.mid)
resp = requests.get(url)
song_data = json.loads(re.search(r'g_SongData = .*};', resp.text).group()[13:-1])
self.song_id = song_data['songid']
self.song_subtitle = song_data['songsubtitle']
self.song_name = song_data['songname']
self.song_title = song_data['songtitle']
if not self.title:
self.title = self.song_title
info_data = json.loads(re.search(r'info :.*}}', resp.text).group()[7:])
self.info = info_data
soup = BeautifulSoup(resp.text, 'html.parser')
self.image = 'https:' + soup.find(class_='data__photo')['src']
def _get_hot_comment(self):
"""
获得热门评论与总评论数
:return:
"""
url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg'
params = {
'format': 'json',
'reqtype': '2',
'biztype': '1',
'topid': self.song_id,
'cmd': '8',
'pagenum': '0',
'pagesize': '1'
}
resp = requests.get(url, params=params)
data = json.loads(resp.text)
self.comment_total = data['comment']['commenttotal']
self.hot_comment = data['hot_comment']['commentlist']
self.comment_page_size = math.ceil(self.comment_total / 25)
def comment_page(self, page=1):
"""
获得评论
:param page:
:return:
"""
url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg'
params = {
'format': 'json',
'reqtype': '2',
'biztype': '1',
'topid': self.song_id,
'cmd': '8',
'pagenum': page - 1,
'pagesize': '25'
}
resp = requests.get(url, params=params)
data = json.loads(resp.text)
return data['comment']['commentlist']
| UTF-8 | Python | false | false | 5,228 | py | 8 | song.py | 7 | 0.516194 | 0.495602 | 0 | 160 | 30.2625 | 118 |
CLSPhila/RecordLib | 3,547,643,039,848 | 3d9564c56fb4401fa13e38df192bd84c9e5d68b5 | 7c7f6571373779bffd934c6c8c3441335b7b7319 | /cleanslate/migrations/0008_auto_20200818_1943.py | c29d6e951376e1769ac91b9f79dc4b86fbf61782 | [] | no_license | https://github.com/CLSPhila/RecordLib | efffcf002b369e0beab83ab9ea4ffc9693a28a2c | 3b870fc9026c180455d9953a87e903725de3415d | refs/heads/main | 2021-07-22T04:02:30.271885 | 2021-07-07T20:37:01 | 2021-07-07T20:37:01 | 191,973,008 | 8 | 7 | null | false | 2021-07-22T14:50:27 | 2019-06-14T16:13:29 | 2021-07-07T20:37:10 | 2021-07-22T14:50:26 | 3,982 | 7 | 7 | 6 | Python | false | false | # Generated by Django 2.2.13 on 2020-08-18 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cleanslate', '0007_sourcerecord_parse_status'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='default_atty_address',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='userprofile',
name='default_atty_name',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='userprofile',
name='default_atty_organization',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='userprofile',
name='default_atty_phone',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='userprofile',
name='default_bar_id',
field=models.CharField(default='', max_length=50),
),
]
| UTF-8 | Python | false | false | 1,151 | py | 280 | 0008_auto_20200818_1943.py | 231 | 0.562989 | 0.534318 | 0 | 38 | 29.289474 | 63 |
thorcc/Programmeringskurs-Sandvika | 15,607,911,164,926 | 4d9328fac15cef7cb9f6f057f7d049d343219b8b | 6f07e3222e3f7810a302ce9a54f266013109f827 | /diverse/gpx-to-csv.py | 84767a982fe90eea056a65ae081b2a207aef460b | [] | no_license | https://github.com/thorcc/Programmeringskurs-Sandvika | 4539657109204e83a205e40608ff8a6fa71c17a6 | b710a9aa2b3f151648ed55ecb1fe8de64e4cbb4a | refs/heads/master | 2023-08-30T16:10:44.894358 | 2021-11-17T11:10:36 | 2021-11-17T11:10:36 | 427,966,322 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Program som konverterer gpx-filer til csv-filer
# Programmet krever pakken gpx_converter
# Åpne terminalen/CMD og kjør kommandoen: pip install -U gpx-converter
from gpx_converter import Converter
from datetime import datetime
input = "kolsaastoppen_med_Eirik.gpx"
output = "kolsaastoppen_med_Eirik.csv"
Converter(input_file=input).gpx_to_csv(output_file=output)
# Resten av koden konverterer fra datetime til sekunder
f = open(output,"r")
lines = f.readlines()
starttime = datetime.strptime(lines[1].split(",")[0][:19], "%Y-%m-%d %H:%M:%S")
f = open(output, "w")
f.write(lines[0])
for line in lines[1::]:
newline = line.split(",")
newtime = datetime.strptime(newline[0][:19], "%Y-%m-%d %H:%M:%S")
newline[0] = str(int((newtime - starttime).total_seconds()))
f.write(",".join(newline))
| UTF-8 | Python | false | false | 811 | py | 13 | gpx-to-csv.py | 4 | 0.697157 | 0.684796 | 0 | 24 | 32.708333 | 79 |
gdennany/BlockChainProgramming | 1,975,684,989,018 | 59565a9cd2c97674c1af0797de4d9183925689dd | e0709305da506cc2377f4980dbfecda71ed84b61 | /BlockChain/Transaction.py | 9d4c484ac17f90b077609e9e0c1385c40c0f5458 | [] | no_license | https://github.com/gdennany/BlockChainProgramming | a4c69eed522514b9689e80861dc883d5e11d48d9 | a112e7998316be6f0206d13e904e3a20b1f0ac24 | refs/heads/master | 2021-04-22T02:40:36.989679 | 2020-04-05T21:07:21 | 2020-04-05T21:07:21 | 249,844,552 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import Signatures
#Tx abbreviates transaction
class Tx:
inputs = None #list of input address
outputs = None #list of output addresses and amounts
sigs = None #list of signatures
reqd = None #list of required signatures that are not inputs (to facilitate escrow transactions)
def __init__(self):
self.inputs = []
self.outputs = []
self.sigs = []
self.reqd = []
def add_input(self, from_addr, amount):
self.inputs.append((from_addr, amount))
def add_output(self, to_addr, amount):
self.outputs.append((to_addr, amount))
def add_reqd(self, addr):
self.reqd.append(addr)
def sign(self, privateKey):
message = self.__gather() # __ indicates this is a private member function
newSig = Signatures.sign(message, privateKey)
self.sigs.append(newSig)
def is_valid(self):
totalIn = 0
totalOut = 0
message = self.__gather()
for addr, amount in self.inputs:
found = False
for s in self.sigs:
if Signatures.verify(message, s, addr):
found = True
if not found:
#print("No good signature found for " + str(message))
return False
if amount < 0:
return False
totalIn = totalIn + amount
for addr in self.reqd:
found = False
for s in self.sigs:
if Signatures.verify(message, s, addr):
found = True
if not found:
return False
for addr, amount in self.outputs:
if amount < 0:
return False
totalOut = totalOut + amount
#if totalOut > totalIn:
#print("Outputs exceed inputs")
# return False
return True
def __gather(self):
data = []
data.append(self.inputs)
data.append(self.outputs)
data.append(self.reqd)
return data
def __repr__(self):
reprstr = "Inputs:\n"
for addr, amount in self.inputs:
reprstr = reprstr + str(amount) + " from " + str(addr) + "\n"
reprstr = reprstr + "Outputs:\n"
for addr, amount in self.outputs:
reprstr = reprstr + str(amount) + " to " + str(addr) + "\n"
reprstr = reprstr + "Required:\n"
for x in self.reqd:
reprstr = reprstr + str(x) + "\n"
reprstr = reprstr + "Signatures:\n"
for s in self.sigs:
reprstr = reprstr + str(s) + "\n"
reprstr = reprstr + "End\n"
return reprstr
if __name__ == '__main__':
priv1, publ1 = Signatures.generate_keys()
priv2, publ2 = Signatures.generate_keys()
priv3, publ3 = Signatures.generate_keys()
priv4, publ4 = Signatures.generate_keys()
#Testing valid transactions
#Transaction 1: user 1 sends one keys from his public key (publ1) to the public key of user 2 (publ2) and signs the transaction
Tx1 = Tx()
Tx1.add_input(publ1, 1)
Tx1.add_output(publ2, 1)
Tx1.sign(priv1)
Tx2 = Tx()
Tx2.add_input(publ1, 2)
Tx2.add_output(publ2, .5)
Tx2.add_output(publ2, .5)
Tx2.add_output(publ3, 1)
Tx2.sign(priv1)
Tx3 = Tx()
Tx3.add_input(publ3, 1.2)
Tx3.add_output(publ1, 1.1)
Tx3.add_reqd(publ4) #test escrow transactions
Tx3.sign(priv3)
Tx3.sign(priv4) #third pary must also sign in escrow transactions
print()
for t in [Tx1, Tx2, Tx3]:
if t.is_valid():
print("Successful transaction")
else:
print("Error: Failed transaction")
#Testing invalid transactions
#Test invalid signature
Tx4 = Tx()
Tx4.add_input(publ1, 1)
Tx4.add_output(publ2, 1)
Tx4.sign(priv2)
#Test escrow transaction not signed by the third party (should fail)
Tx5 = Tx()
Tx5.add_input(publ3, 1.2)
Tx5.add_output(publ1, 1.1)
Tx5.add_reqd(publ4) #test escrow transactions
Tx5.sign(priv3)
#Tx3.sign(priv4) #third party doesnt sign => is invalid
#Test two input addresses, but only one signs it
Tx6 = Tx()
Tx6.add_input(publ3, 1)
Tx6.add_input(publ4, .1)
Tx6.add_output(publ1, 1.1)
Tx6.sign(priv3) #only one person signs => should be invalid
#Test Outputs exceeding the input
Tx7 = Tx()
Tx7.add_input(publ4, 1.2)
Tx7.add_output(publ1, 1)
Tx7.add_output(publ2, 2)
Tx7.sign(priv4)
#Test negative values
Tx8 = Tx()
Tx8.add_input(publ2, -1)
Tx8.add_output(publ1, -1)
Tx8.sign(priv2)
#A transaction that has been modified
Tx9 = Tx()
Tx9.add_input(publ1, 1)
Tx9.add_output(publ2, 1)
Tx9.sign(priv1)
Tx9.outputs[0] = (publ3, 1)
for t in [Tx4, Tx5, Tx6, Tx7, Tx8, Tx9]:
if t.is_valid():
print("Error: Bad transaction passed")
else:
print("Success: Bad Transaction detected")
| UTF-8 | Python | false | false | 5,064 | py | 12 | Transaction.py | 12 | 0.561216 | 0.534163 | 0 | 171 | 28.573099 | 131 |
kakulukia/schlampenadmin | 15,522,011,808,931 | 3278451da3ee086c015d832fabfacce8025cb563 | d372a147f8c715cec041dbe51912983f1b9c1675 | /swing_admin/migrations/0002_auto_20170828_1510.py | ed1d83c5cdc2a3a738f9d3a74104fd9fc1566f01 | [] | no_license | https://github.com/kakulukia/schlampenadmin | b521fa617cd714bcab24ded8b8832fd9373cb598 | 6586237222a7a7a10dac159b40563e51433e27cd | refs/heads/master | 2021-12-28T19:04:41.764966 | 2021-04-09T09:20:20 | 2021-04-09T09:20:20 | 41,327,236 | 1 | 0 | null | false | 2021-09-10T17:58:29 | 2015-08-24T21:10:48 | 2021-04-09T09:20:23 | 2021-09-10T17:58:28 | 352 | 1 | 0 | 5 | JavaScript | false | false | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-28 15:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('swing_admin', '0001_squashed_0006_remove_news_image'),
]
operations = [
migrations.AlterModelOptions(
name='dates',
options={'verbose_name': 'Date', 'verbose_name_plural': 'Dates'},
),
migrations.AlterModelOptions(
name='news',
options={'verbose_name': 'News', 'verbose_name_plural': 'News'},
),
]
| UTF-8 | Python | false | false | 611 | py | 17 | 0002_auto_20170828_1510.py | 10 | 0.585925 | 0.545008 | 0 | 23 | 25.565217 | 77 |
unixorn/themis-lambda | 4,063,039,075,953 | 3dc5372738ed093e2a348224f357078b5f7491e9 | 3a8b40f704695b68b546884db7aa3c16d4608fc3 | /themis_lambda.py | 2b3150d812c2dcd09c308e1edc29e6792f272457 | [
"Apache-2.0"
] | permissive | https://github.com/unixorn/themis-lambda | 893a879a715f6f83e504ed808e9949289dc96aa0 | f69eb0c8285cdab13afa78e9a6eac176464ae411 | refs/heads/master | 2021-10-12T02:36:08.921032 | 2019-01-31T19:17:20 | 2019-01-31T19:17:20 | 105,590,188 | 1 | 0 | Apache-2.0 | false | 2019-01-31T19:17:21 | 2017-10-02T22:07:47 | 2018-01-16T15:33:03 | 2019-01-31T19:17:20 | 15 | 1 | 0 | 0 | Python | false | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Joe Block <jpb@unixorn.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
themis-lambda is a tool to scan autoscaling groups and determine which
instances are busy and which are not, then apply instance protection to
the busy instances so they won't be killed during scale-down events.
Sample Trigger Event
{
"asgName": "Electric-Horse-Ziggurat",
"metricsPort": 9000,
"busyURL": "/work_status",
"busyValue": "BUSY",
"idleValue": "IDLE"
}
'''
import sys
import urllib2
import boto3
from logrus.utils import getCustomLogger
# this is a pointer to the module object instance itself. We'll attach
# a logger to it later.
this = sys.modules[__name__]
def getASGInstances(asgID=None, client=None, NextToken=None, MaxRecords=10):
'''
Get the members of an autoscaling group
:param str asgID: What Autoscaling group to list
:param int MaxRecords: How many instances to look at at a time
:param boto3client client: boto3 autoscaling client
:param boto3 pagination token NextToken: token for the next page of results
'''
assert isinstance(asgID, basestring), ("asgID must be a string but is %r." % asgID)
response = None
if NextToken:
response = client.describe_auto_scaling_instances(MaxRecords=MaxRecords, NextToken=NextToken)
else:
response = client.describe_auto_scaling_instances(MaxRecords=MaxRecords)
for i in response['AutoScalingInstances']:
if i['AutoScalingGroupName'] == asgID:
yield i['InstanceId']
if 'NextToken' in response:
for i in getASGInstances(client=client, asgID=asgID, NextToken=response['NextToken']):
yield i
def setASGInstanceProtection(asgName=None, client=None, instances=None, protected=True, dryRun=False):
'''
Set instance protection for instances in instanceList so that they are
not terminated during a scale-down event in the ASG
:param str asgName: Autoscaling group to affect
:param list instances: list of instance IDs to change protection status for
:param bool protected: What to set the instance protection to
'''
assert isinstance(asgName, basestring), ("asgName must be a basestring but is %r." % asgName)
assert isinstance(dryRun, bool), ("dryRun must be a bool but is %r." % dryRun)
assert isinstance(instances, list), ("instances must be a list but is %r." % instances)
assert isinstance(protected, bool), ("protected must be a bool but is %r." % protected)
this.logger.info('Setting %s instance protection to %s', instances, protected)
if dryRun:
this.logger.info('dry run - not altering instance protection')
return None
else:
response = client.set_instance_protection(InstanceIds=instances,
AutoScalingGroupName=asgName,
ProtectedFromScaleIn=protected)
return response
def getPrivateIP(client=None, instanceID=None):
'''
Return the private IP of an instance
'''
assert isinstance(instanceID, basestring), ("instanceID must be a basestring but is %r." % instanceID)
instanceData = client.describe_instances(InstanceIds=[instanceID])
return instanceData['Reservations'][0]['Instances'][0]['PrivateIpAddress']
def getInstanceWorkStatuses(client=None,
instances=None,
busyURL='/work_status',
metricsPort=9000,
busyValue='BUSY',
idleValue='IDLE'):
'''
Check instance work status
'''
assert isinstance(busyURL, basestring), ("busyURL must be a basestring but is %r." % busyURL)
assert isinstance(busyValue, basestring), ("busyValue must be a basestring but is %r." % busyValue)
assert isinstance(idleValue, basestring), ("idleValue must be a basestring but is %r." % idleValue)
assert isinstance(instances, list), ("instances must be a list but is %r." % instances)
assert isinstance(metricsPort, int), ("metricsPort must be an int but is %r." % metricsPort)
statuses = {}
statuses['busy'] = {}
statuses['idle'] = {}
statuses['error'] = {}
this.logger.info('Checking instances %s', list(instances))
for i in instances:
this.logger.info('Checking %s', i)
privateIP = getPrivateIP(client=client, instanceID=i)
this.logger.info('%s has IP %s, checking busy status', i, privateIP)
try:
statusURL = "http://%s:%s/%s" % (privateIP, metricsPort, busyURL)
this.logger.info('Checking %s for instance status', statusURL)
probe = urllib2.urlopen(statusURL)
workStatus = probe.read().lower().strip()
except urllib2.URLError as e:
workStatus = e.reason
this.logger.warning(workStatus)
statuses['error'][i] = privateIP
this.logger.info('status: %s', workStatus)
if workStatus == busyValue.lower().strip():
this.logger.info('adding %s to busy list', i)
statuses['busy'][i] = privateIP
elif workStatus == idleValue.lower().strip():
this.logger.info('adding %s to idle list', i)
statuses['idle'][i] = privateIP
else:
this.logger.warning('%s is not reporting a valid work state', i)
statuses['error'][i] = privateIP
return statuses
def processASG(asgName=None,
region=None,
busyURL=None,
metricsPort=9000,
busyValue='BUSY',
idleValue='IDLE',
dryRun=False):
'''
Process an ASG and return a dict describing the busy statuses of the
instances in the ASG.
:param str asgName: Auto Scaling Group to process
:param str busyURL: What url to probe on the instances in the ASG
:param int metricsPort: What port for the http server reporting the busy status
:param str busyValue: What busy instances will return. Default 'BUSY'
:param str idleValue: What idle instances will return. Default 'IDLE'
:param bool dryRun: whether or not to change instances instance protection
'''
assert isinstance(asgName, basestring), ("asgName must be a basestring but is %r." % asgName)
assert isinstance(busyURL, basestring), ("busyURL must be a basestring but is %r." % busyURL)
assert isinstance(busyValue, basestring), ("busyValue must be a basestring but is %r." % busyValue)
assert isinstance(dryRun, bool), ("dryRun must be a bool but is %r." % dryRun)
assert isinstance(idleValue, basestring), ("idleValue must be a basestring but is %r." % idleValue)
assert isinstance(metricsPort, int), ("metricsPort must be a int but is %r." % metricsPort)
assert isinstance(region, basestring), ("region must be a basestring but is %r." % region)
if dryRun:
this.logger.warning('Activating dry-run mode')
# Set up boto3 connections
asgClient = boto3.client('autoscaling', region_name=region)
ec2client = boto3.client('ec2', region_name=region)
instances = list(getASGInstances(asgID=asgName, client=asgClient, MaxRecords=50))
this.logger.info('ASG %s members: %s', asgName, instances)
this.logger.info('Checking which members are busy...')
asgInstanceStatuses = getInstanceWorkStatuses(client=ec2client,
busyURL=busyURL,
busyValue=busyValue,
idleValue=idleValue,
metricsPort=metricsPort,
instances=list(instances))
this.logger.info('Statuses: %s', asgInstanceStatuses)
this.logger.info('Applying instance protection')
if len(asgInstanceStatuses['busy'].keys()) > 0:
setASGInstanceProtection(client=asgClient,
asgName=asgName,
instances=asgInstanceStatuses['busy'].keys(),
dryRun=dryRun,
protected=True)
else:
this.logger.info('No instances reporting busy status')
if len(asgInstanceStatuses['idle'].keys()) > 0:
setASGInstanceProtection(client=asgClient,
asgName=asgName,
instances=asgInstanceStatuses['idle'].keys(),
dryRun=dryRun,
protected=False)
else:
this.logger.info('No instances reporting idle status')
if len(asgInstanceStatuses['error'].keys()) > 0:
this.logger.warning('The following instances did not report a status and are not going to be touched:')
this.logger.warning(asgInstanceStatuses['error'])
else:
this.logger.info('No problems checking instance idle status')
return asgInstanceStatuses
def handler(event, context):
'''
Handle incoming events from AWS
'''
asgName = event.get('asgName')
busyURL = event.get('busyURL')
busyValue = event.get('busyValue')
dryRun = event.get('dryRun')
idleValue = event.get('idleValue')
logLevel = event.get('logLevel')
logName = event.get('logName')
metricsPort = event.get('metricsPort')
region = event.get('region')
# Sanity check and default setting
if not asgName:
raise ValueError, 'You must specify an asgName'
else:
print 'asgName: ' + asgName
# Set up logging
if not logLevel:
logLevel = 'INFO'
if not logName:
logName = 'themis'
logLevel = logLevel.upper()
this.logger = getCustomLogger(name=logName, logLevel=logLevel)
this.logger.debug('Setting log level to %s', logLevel)
this.logger.info('Processing %s', asgName)
# Peel settings out of incoming event
if not dryRun:
dryRun = False
if not busyURL:
busyURL = '/work_status'
this.logger.info('Using default busyURL %s', busyURL)
if not busyValue:
busyValue = 'BUSY'
this.logger.info('Using default busyValue %s', busyValue)
if not idleValue:
idleValue = 'IDLE'
this.logger.info('Using default idleValue %s', idleValue)
if not metricsPort:
# Use the standard Apgar port
metricsPort = 9000
this.logger.info('Using default metricsPort %s', metricsPort)
if not region:
region = 'us-west-2'
this.logger.info('Using default region %s', region)
this.logger.debug('asgName: %s', asgName)
this.logger.debug('region: %s', region)
this.logger.debug('busyURL: %s', busyURL)
this.logger.debug('busyValue: %s', busyValue)
this.logger.debug('idleValue: %s', idleValue)
this.logger.debug('metricsPort: %s', metricsPort)
return processASG(asgName=asgName,
region=region,
busyURL=busyURL,
metricsPort=metricsPort,
busyValue=busyValue,
idleValue=idleValue,
dryRun=dryRun)
| UTF-8 | Python | false | false | 11,185 | py | 4 | themis_lambda.py | 1 | 0.663657 | 0.658829 | 0 | 292 | 37.304795 | 107 |
daggy1234/oauthcord | 10,496,900,101,306 | ba6c4ec498e4d6041d296a68db44f5799ba74759 | 50615491f27a33455f7f67ca1e9cc00e2958115b | /oauthcord/application.py | 0f0ec872945b6f4421faab090e9f9168992310cf | [
"MIT"
] | permissive | https://github.com/daggy1234/oauthcord | 77bb73f8b0ba909cabc866cbb827be08dcf12959 | baf919e6008dfccf90bcb82d01f36a3b89467680 | refs/heads/master | 2023-06-24T21:39:14.321436 | 2019-09-17T12:16:23 | 2019-09-17T12:16:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Application(object):
"""
Much like a user object, but for the current application
"""
__slots__ = ("id", "name", "icon", "description", "rpc_origins", "bot_public", "bot_require_code_grant", "owner")
def __init__(self, dict):
# Take out everything that we inherited from the GET /oauth2/applications/@me
# Application info stuff
self.id = dict.get("id")
self.name = dict.get("name")
sef.icon = dict.get("icon")
self.description = dict.get("description")
# Rpc
self.rpc_origins = dict.get("rpc_origins")
# Bot
self.bot_public = dict.get("bot_public")
self.bot_require_code_grant = dict.get("bot_require_code_grant")
# Owner
self.owner = dict.get("owner") | UTF-8 | Python | false | false | 788 | py | 7 | application.py | 6 | 0.583756 | 0.582487 | 0 | 24 | 31.875 | 117 |
7u83/maxdb-buildtools | 6,536,940,270,656 | 48ea8885b353074b69c41c57407a743939c7b9f3 | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/vmake/dependencies.py | e8913a17664fa363f757be05cfa22353f30dfa4c | [] | no_license | https://github.com/7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#
# ========== licence begin GPL
# Copyright (C) 2001 SAP AG
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ========== licence end
#
import os
import time
import cPickle
import vmakeLib
_dayMillisC = 24 * 60 * 60 * 1000
_speedMapping = {
's': 'slow',
'q': 'quick',
'f': 'fast',
}
class VmakeDependencies:
def __init__ (self, target = 'all', speed = 's'):
self.loadVmakeData (target, speed)
mainDependencies = DependenciesSet ()
includeDependencies = DependenciesSet ()
def getCachePath (self, speed):
return (os.environ ['OWN'] + '/sys/wrk/' + _speedMapping [speed]
+ '/all.vmake.pycache')
def loadVmakeData (self, target, speed):
cachePath = self.getCachePath (speed)
dataLoaded = None
if os.path.exists (cachePath):
modified = os.path.getmtime (cachePath)
now = time.time ()
if now - modified < _dayMillisC:
self.all = cPickle.load (open (cachePath, 'rb'))
dataLoaded = 1
if not dataLoaded:
self.callVmake (speed)
cPickle.dump (self.all, open (cachePath, 'wb'), 1)
def callVmake (self, speed):
self.all = {}
parser = vmakeLib.VmakeParser ()
parser.registerHandler (None, self.targetEvent)
parser.parseCollection (['all', 'allknl'], speed)
def targetEvent (self, target):
if target.version == '':
target.version = vmakeLib.independentVersion
self.all [(target.name, target.version)] = target
def dependentsOf (self, headerList, targetList = None):
queue = OnceQueue ()
for headerName in headerList:
try:
target = self.all [(headerName, vmakeLib.independentVersion)]
except KeyError:
keys = self.all.keys ()
keys.sort ()
for key in keys:
name = key [0]
if name == headerName:
print key
raise
queue.add (target, target.asKey ())
modules = {}
while not queue.isEmpty ():
target = queue.next ()
if not hasattr (target, 'callers'):
continue
for module in target.callers:
subtarget = self.all [module]
if subtarget.kind == 'module':
modules [module] = 1
queue.add (subtarget, subtarget.asKey ())
if targetList:
targetModules = self.modulesOf (targetList)
result = []
for module in modules.keys ():
if targetModules.has_key (module):
result.append (module)
else:
result = modules.keys ()
return result
def modulesOf (self, targetList):
queue = OnceQueue ()
targetList = map (macname, targetList)
for targetName in targetList:
for speed in "sqf ":
target = None
try:
target = self.all [(targetName, speed)]
queue.add (target, target.asKey ())
except KeyError:
pass
if target != None:
continue
modules = {}
while not queue.isEmpty ():
target = queue.next ()
if not hasattr (target, 'dependencies'):
continue
for module in target.dependencies:
try:
subtarget = self.all [module]
except KeyError:
continue
if subtarget.kind == 'module':
modules [module] = 1
queue.add (subtarget, subtarget.asKey ())
return modules
def macname (name):
if name [-4:] != '.mac':
name = name + '.mac'
return name
class DependenciesSet:
def __init__ (self):
self.dict = []
def add (self, base, newDep):
try:
dict = self.dict [base]
except KeyError:
dict = {}
self.dict [base] = dict
dict [newDep] = 1
def dependenciesOf (self, base):
pass
class OnceQueue:
def __init__ (self, seen = None):
self.seen = {}
self.queue = []
def add (self, item, name):
if not self.seen.has_key (name):
self.queue.append (item)
self.seen [name] = 1
def isEmpty (self):
return len (self.queue) == 0
def next (self):
result = self.queue [0]
del self.queue [0]
return result
| UTF-8 | Python | false | false | 5,339 | py | 1,027 | dependencies.py | 234 | 0.533808 | 0.526316 | 0 | 169 | 30.568047 | 80 |
Xuan4dream/Leetcode | 549,755,862,177 | 09133b1602353984776d448d7106d77accc6f50e | 0ed666fb3219f919a5f1549609340ac0405ac5f0 | /M_1041. Robot Bounded In Circle.py | 58fce6bcea5d30da255bdd59aeae47e5531af5e5 | [] | no_license | https://github.com/Xuan4dream/Leetcode | 086cdbbbe53a8a7f287de57e02e31cca2ed6db05 | 013c16574fd6082ae80aa074f1d9579a077261b7 | refs/heads/main | 2023-04-15T00:50:03.699455 | 2021-04-30T04:00:51 | 2021-04-30T04:00:51 | 346,591,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[44]:
class Solution(object):
def isRobotBounded(self, instructions):
"""
:type instructions: str
:rtype: bool
"""
# refered to solution:
# 0- north, 1- east, 2-south, 3-west
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
# inital position
x = y = 0
# facing north
idx = 0
for i in instructions:
if i == "L":
idx = (idx +3)%4
elif i == "R":
idx = (idx +1)%4
else:
x += directions[idx][0]
y += directions[idx][1]
# after one circle: it returns to the inital position
# or doesn't face north, then it is bounded
return (x == 0 and y == 0) or idx != 0
# # 04262021 First try with hints
# self.loc = [0, 0]
# self.direction = [0, 1]
# for ins in instructions:
# self.move(ins)
# if self.direction != [0, 1] or self.loc == [0, 0]:
# return True
# else:
# return False
# def move(self, ins):
# left_dir = [[0, 1], [-1, 0], [0, -1], [1, 0], [0, 1]]
# right_dir = [[0, 1], [1, 0], [0, -1], [-1, 0], [0, 1]]
# if ins == "G":
# self.loc = [self.loc[i]+self.direction[i] for i in (0, 1)]
# elif ins == "L":
# self.direction = left_dir[left_dir.index(self.direction) + 1]
# else:
# self.direction = right_dir[right_dir.index(self.direction) + 1]
# # Time: O(N)
# # Space: O(1)
| UTF-8 | Python | false | false | 1,683 | py | 30 | M_1041. Robot Bounded In Circle.py | 30 | 0.423648 | 0.383838 | 0 | 58 | 28 | 77 |
Yizhou-Yang/tagger | 13,091,060,320,963 | a462ee8f5f049cae754561c0d7cd7fe36aad6801 | 65ca2bca6c9ce0262c70f026a28826f98a845473 | /tagger.py | aecefa1ac7c8b7b883ccd5c7c9c18d6e27622687 | [] | no_license | https://github.com/Yizhou-Yang/tagger | 73c1297ebfd73c451ca95ee6d0fbac7c5d6a66b2 | c37a16c29d9e632bfc8941d5d1bb681945187c19 | refs/heads/main | 2023-05-08T05:41:13.271237 | 2021-06-01T19:41:57 | 2021-06-01T19:41:57 | 372,942,162 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Currently reads in the names of the training files, test file and output file,
# and calls the tagger (which you need to implement)
import os
import sys
import re
import math
import numpy as np
#this is vanilla viberti taught in lecture
#take log for the probability before nomalizing, and preload all probablilies with 0.01 to avoid all zero columns.
#this implementation currently does not use numpy... so it has efficiency issues. But it runs within a minute on my machine, so hopefully it isnt too slow...
#reads sentences, and does tagging for every .!;? (which must be PUN) to mitigate some efficiency issues.
#preloads some symbols that are repeatly misclassified
#have an accuracy of around 80%... a bit far from 90% but this is best I can do(already late)
#A is transition matrix, B is emission matrix,P is the initial probability
#assume there are no more than 200000 distinct words in both training and test.
taglist = ["AJ0","AJC","AJS","AT0","AV0","AVP","AVQ","CJC","CJS","CJT","CRD","DPS","DT0","DTQ","EX0","ITJ","NN0","NN1","NN2","NP0","ORD","PNI","PNP","PNQ","PNX","POS","PRF","PRP","PUL","PUN","PUQ","PUR","TO0","UNC","VBB","VBD","VBG","VBI","VBN","VBZ","VDB","VDD","VDG","VDI","VDN","VDZ","VHB","VHD","VHG","VHI","VHN","VHZ","VM0","VVB","VVD","VVG","VVI","VVN","VVZ","XX0","ZZ0","AJ0-NN1","AJ0-VVD","AJ0-VVG","AJ0-VVN","AV0-AJ0","AVP-PRP","AVQ-CJS","CJS-AVQ","CJS-PRP","CJT-DT0","CRD-PNI","DT0-CJT","NN1-AJ0","NN1-NP0","NN1-VVB","NN1-VVG","NN2-VVZ","NP0-NN1","PNI-CRD","PRP-AVP","PRP-CJS","VVB-NN1","VVD-AJ0","VVD-VVN","VVG-AJ0","VVG-NN1","VVN-AJ0","VVN-VVD","VVZ-NN2","AJ0-AV0"]
tagdict = {}
reversedict = {}
worddict = {}
#preloaded tags, to be built manually
preload = {"a":"AT0","that":"CJT","of":"PRF","at":"PRP","the":"AT0",",":"PUN","had":"VHD","there":"EX0","was":"VBD","he":"PNP","with":"PRP","to":"TO0","both":"AV0","all":"DT0","around":"AVP","on":"PRP","him":"PNP","for":"CJS","now":"AV0","make":"VVI","before":"AV0","about":"PRP","one":"CRD","it":"PNP"}
A = []
B = []
P = []
countP = 0
countA = []
countB = []
numwords = 0
prob_trellis = []
path_trellis = []
def train(training_file):
global countP
global numwords
rd = open(training_file,"r",errors='replace')
outList = rd.readlines()
last = None
for line in outList:
split = line.split()
word = split[0]
tag = split[2]
if(len(split)>2):
split = line.split(" : ")
word = split[0]
tag = split[1].split()[0]
#update P
#print(tag)
i = tagdict.get(tag)
P[i]+= 1
countP+=1
#update A
if(last!=None):
A[tagdict.get(last)][tagdict.get(tag)]+=1
countA[tagdict.get(last)]+=1
last = tag
#update B
wordindex = worddict.get(word,-1)
if(wordindex==-1):
worddict.update({word:numwords})
wordindex = numwords
numwords+=1
countB[tagdict.get(tag)]+=1
B[tagdict.get(tag)][wordindex]+=1
#find the most probable word lists
#path_trellis is an array of numbers,translate it into word tags.
def findx(s,num,obs):
maxprob = 0
maxindex = -1
for x in range(len(taglist)):
prob = prob_trellis[x][num-1]*A[x][s]*B[s][obs]
if(prob>maxprob):
maxprob = prob
maxindex = x
return maxindex
def findmax(prob_trellis,index):
maxprob = 0
maxindex = 0
for x in range(len(taglist)):
prob = prob_trellis[x][index]
if(prob>=maxprob):
maxprob = prob
maxindex = x
return maxindex
#clean a,b,p
def clean():
#total = []
for i in range(len(P)):
P[i] = P[i]/countP
for i in range(len(tagdict)):
sumB = 0
for j in range(len(tagdict)):
if(countA[i]==0):
A[i][j]=0.000001
continue
A[i][j] = A[i][j]/countA[i]
for j in range(len(worddict)):
if(countB[i]==0):
B[i][j]=0.000001
continue
B[i][j] = B[i][j]/countB[i]
#print(total)
def v_sentence(sentence,wr,punctuation):
default = worddict.get(sentence[0])
if default == None:
default = 19
for s in range(len(taglist)):
#print(P[s] * B[s][worddict.get(outList[0],-1)])
#print(counttag[s])
prob_trellis[s][0] = P[s] * B[s][default]
path_trellis[s][0] = [s]
#handle never-before-seen words
#print(prob_trellis[s][0])
#for s in range(len(taglist)):
# print(B[s][worddict.get(outList[0])])
# o is the item number, obs is the observation
for num in range(1,len(sentence)):
#if it is one of our preloads
obs = worddict.get(sentence[num])
if(obs==None):
obs = default
total = 0
for s in range(len(taglist)):
if preload.get(sentence[num-1])!=None:
tag = preload.get(sentence[num-1])
x = tagdict.get(tag)
else:
x = findx(s,num,obs)
if(x==-1):
print(sentence)
print(num)
exit()
#every round, not every state can be reached by some other state
prob_trellis[s][num] = prob_trellis[x][num-1]*A[x][s]*B[s][obs]
total += prob_trellis[s][num]
new_path = list(path_trellis[x][num-1])
new_path.append(s)
path_trellis[s][num] = new_path
#nomalize prob_trellis[s][num]
for s in range(len(taglist)):
prob_trellis[s][num] = prob_trellis[s][num]/total
#for s in range(len(taglist)):
# print(path_trellis[s][num])
maxnum = findmax(prob_trellis,len(sentence)-1)
writesentence(path_trellis[maxnum][len(sentence)-1],wr,sentence,punctuation)
def viberti(test_file,output_file):
global numwords
rd = open(test_file,"r")
wr = open(output_file,"w")
outList = rd.readlines()
#clean the outlist
for i in range(len(outList)):
outList[i] = outList[i][:len(outList[i])-1]
#print(outList)
sentence = []
for item in outList:
if item == '.' or item == '!' or item == '?' or item == ';':
v_sentence(sentence,wr,item)
sentence = []
else:
sentence.append(item)
#write the calculated word list to output
def writesentence(output,wr,sentence,punctuation):
for i in range(len(output)):
wr.write(sentence[i]+' : '+reversedict.get(output[i])+'\n')
wr.write(punctuation+' : '+'PUN'+'\n')
def word(training_list, test_file, output_file):
# Tag the words from the untagged input file and write them into the output file.
# Doesn't do much else beyond that yet.
print("Tagging the file.")
#
# YOUR IMPLEMENTATION GOES HERE
#
for i in range(len(taglist)):
tagdict.update({taglist[i]:i})
reversedict.update({i:taglist[i]})
for i in range(len(taglist)):
P.append(0.01)
countA.append(0.0)
countB.append(0.0)
temp = []
for j in range(len(taglist)):
temp.append(0.01)
A.append(temp)
temp = []
for j in range(200000):
temp.append(0.01)
B.append(temp)
prob_trellis.append(temp)
temp = []
for j in range(200000):
temp.append([])
path_trellis.append(temp)
#print(len(B))
#print(len(B[0]))
for training_file in training_list:
train(training_file)
clean()
viberti(test_file,output_file)
if __name__ == '__main__':
# Run the tagger function.
print("Starting the tagging process.")
# Tagger expects the input call: "python3 tagger.py -d <training files> -t <test file> -o <output file>"
parameters = sys.argv
training_list = parameters[parameters.index("-d")+1:parameters.index("-t")]
test_file = parameters[parameters.index("-t")+1]
output_file = parameters[parameters.index("-o")+1]
# print("Training files: " + str(training_list))
# print("Test file: " + test_file)
# print("Ouptut file: " + output_file)
# Start the training and tagging operation.
word (training_list, test_file, output_file) | UTF-8 | Python | false | false | 8,543 | py | 1 | tagger.py | 1 | 0.550977 | 0.533653 | 0 | 255 | 31.509804 | 677 |
joezuntz/des-tile-tools | 14,620,068,692,903 | 008c4468c8fc5bc92f6b46988990c50db60df4d5 | c83dea122415e8b60948e10f566169192559a1e5 | /tile_collections.py | 526fb10b1583be82579894b5dd375fd41cda0a69 | [] | no_license | https://github.com/joezuntz/des-tile-tools | 064244e7fae0a7f41df50fac1e7c73c98193db3c | 5a8b8630232735eeb895b366af687370dbf139a7 | refs/heads/master | 2021-01-19T11:31:08.572574 | 2016-06-10T15:24:06 | 2016-06-10T15:24:06 | 60,851,020 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #DES File tools
import re
import os
UNKNOWN_TILE = "unknown"
tile_pattern = re.compile(r'DES[0-9][0-9][0-9][0-9][+-][0-9][0-9][0-9][0-9]')
def find_tilename(name):
m = tile_pattern.search(name)
if m is None:
return UNKNOWN_TILE
return m.group()
class TileCollection(object):
"A directory with lots of files or subdirectories which have tiles in their names"
def __init__(self, path=None, files=None):
if files is not None:
self.files = files
elif path is not None:
self.files = self.find_files(path)
else:
raise ValueError("Must initialize a TileCollection with either path or files")
def find_files(self, path):
all_files = os.listdir(path)
files = {}
for filename in all_files:
tile = find_tilename(filename)
if tile==UNKNOWN_TILE:
continue
files[tile] = filename
return files
def __contains__(self, tile):
return tile in self.files
def files_with_path(self, path):
for tile, filename in self.files.items():
yield tile, os.path.join(path, filename)
def existing_files_with_path(self, path):
for tile, filename in self.files_with_path(path):
if os.path.exists(filename):
yield tile, filename
def inverse_filter(self, other):
files = {}
for tile, filename in self.files.items():
if tile not in other:
files[tile] = filename
return TileCollection(files=files)
def filter(self, other):
files = {}
for tile, filename in self.files.items():
if tile in other:
files[tile] = filename
return TileCollection(files=files)
| UTF-8 | Python | false | false | 1,787 | py | 2 | tile_collections.py | 2 | 0.584219 | 0.575266 | 0 | 59 | 29.288136 | 90 |
underminedsk/kaleidoscope | 11,501,922,451,383 | 5f352fb5e75cee8e02dbc7fcaa065cc256e10d62 | 0f0238a2c2210fcd797f32a3a724a97ac5294e45 | /light_puzzle/light_puzzle_demo.py | 3566d684f8d12c7d8ae24f4a572c998a87554263 | [] | no_license | https://github.com/underminedsk/kaleidoscope | 2a9a8a7ec7220a21afbcc933a9da9b4509ce3b35 | bffd72caf07479d020758abe7fa1d431657215d1 | refs/heads/master | 2020-04-06T07:02:21.902922 | 2016-08-21T00:43:02 | 2016-08-21T00:43:02 | 59,241,432 | 0 | 4 | null | false | 2016-06-14T06:23:39 | 2016-05-19T20:44:06 | 2016-05-20T23:46:36 | 2016-06-14T06:23:39 | 53 | 0 | 1 | 0 | Arduino | null | null | MAX_ALLOWED_MOVES = 9999
NUM_NODES = 3
STATES = ['RED', 'ORANGE', 'YELLOW']
#STATES = ['RED', 'ORANGE', 'YELLOW', 'GREEN', 'BLUE']
def initial_game_state():
#return [STATES[0] for i in range(0,NUM_NODES)]
return ['RED', 'ORANGE', 'YELLOW']
def get_user_input():
choice = raw_input('choose a node (1-%s)):' % (NUM_NODES))
try:
choice = int(choice)
if choice <=0 or choice > NUM_NODES:
raise ValueError()
else:
return choice
except:
print 'ERROR: enter a number between 1 and %s' % NUM_NODES
return None
def next_node_state(state):
idx = STATES.index(state)+1
return STATES[idx] if idx < len(STATES) else STATES[0]
def next_game_state(current_game_state, user_choice):
next_state = []
for node_idx in range(0, NUM_NODES):
cur_node_state = current_game_state[node_idx]
if node_idx+1 != user_choice:
next_state.append(next_node_state(cur_node_state))
else:
next_state.append(cur_node_state)
return next_state
def puzzle_solved(current_game_state):
for node_state in current_game_state:
if node_state != STATES[-1]:
return False
return True
if __name__ == '__main__':
print 'rules: (1) all nodes start as %s' % STATES[0]
print ' (2) win make all the nodes %s' % STATES[-1]
print ' (3) choosing a node changes the color of the other nodes. '
print ' (4) node colors are %s' % ' -> '.join(STATES)
moves = 0
current_game_state = initial_game_state()
while moves < MAX_ALLOWED_MOVES:
print current_game_state
moves += 1
user_choice = get_user_input()
if user_choice:
current_game_state = next_game_state(current_game_state, user_choice)
if puzzle_solved(current_game_state):
print 'Success! You solved the puzzle in %s moves' % moves
break
| UTF-8 | Python | false | false | 1,961 | py | 33 | light_puzzle_demo.py | 29 | 0.583886 | 0.572157 | 0 | 63 | 30.063492 | 81 |
TylerET/Morrowind-Text-Adventure | 14,061,722,966,963 | d275c47295780583658ef611ed3d133ca0ef4e16 | a4ed2d1a2bb505a8fa0792a3072380878cf93343 | /Import.py | 573eb53675381b6b4c1ce3277aa710e49d324bbd | [] | no_license | https://github.com/TylerET/Morrowind-Text-Adventure | 9d054b71d009fb803d0eef31ba433ab72f558e0f | 2b4ed63c93531bdfb6c034a1247aecbd4bd5e8fb | refs/heads/main | 2023-09-06T03:55:51.350100 | 2021-09-28T18:25:44 | 2021-09-28T18:25:44 | 411,395,213 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
f = open('importNPC.txt', 'r')
w = open('NPCs.py', 'a')
print(f.readline())
for _ in range(25):
line = f.readline().rstrip('\n')
description = ''
name = line
minDmg = ''
maxDmg = ''
hp = ''
magicka = ''
weapon = ''
inventory= ''
className = name.replace(' ','')
print('class {}(NPC):\n\tdef __init__(self):\n\t\tsuper().__init__(name=\'{}\', hp=\'{}\', minDmg={}, maxDmg={}, magicka={}, weapon={}, inventory={})\n\n'.format(className, name, hp, minDmg, maxDmg, magicka, weapon, inventory))
w.close()
| UTF-8 | Python | false | false | 594 | py | 7 | Import.py | 6 | 0.505051 | 0.501684 | 0 | 24 | 22.5 | 231 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.